1mod blame_verifier;
6pub mod confirmation_meter;
7pub mod consensus_executor;
8pub mod consensus_new_block_handler;
9use cfxcore_errors::ProviderBlockError;
10use cfxcore_pow as pow;
11
12use pow::{PowComputer, ProofOfWorkConfig};
13
14use crate::{
15 block_data_manager::{
16 BlockDataManager, BlockExecutionResultWithEpoch, DataVersionTuple,
17 EpochExecutionContext,
18 },
19 consensus::{
20 anticone_cache::AnticoneCache,
21 consensus_inner::consensus_executor::ConsensusExecutor,
22 debug_recompute::log_invalid_state_root, pastset_cache::PastSetCache,
23 pos_handler::PosVerifier,
24 },
25 pos::pow_handler::POS_TERM_EPOCHS,
26 state_exposer::{ConsensusGraphBlockExecutionState, STATE_EXPOSER},
27 verification::VerificationConfig,
28};
29use cfx_internal_common::{
30 consensus_api::StateMaintenanceTrait, EpochExecutionCommitment,
31};
32use cfx_parameters::{consensus::*, consensus_internal::*};
33use cfx_types::{H256, U256, U512};
34use dag::{
35 get_future, topological_sort, Graph, RichDAG, RichTreeGraph, TreeGraph, DAG,
36};
37use hashbrown::HashMap as FastHashMap;
38use hibitset::{BitSet, BitSetLike, DrainableBitSet};
39use link_cut_tree::{CaterpillarMinLinkCutTree, SizeMinLinkCutTree};
40use malloc_size_of::{MallocSizeOf, MallocSizeOfOps};
41use malloc_size_of_derive::MallocSizeOf as DeriveMallocSizeOf;
42use metrics::{Counter, CounterUsize};
43use primitives::{
44 pos::PosBlockId, Block, BlockHeader, BlockHeaderBuilder, EpochId,
45};
46use slab::Slab;
47use std::{
48 cmp::{max, min},
49 collections::{BinaryHeap, HashMap, HashSet, VecDeque},
50 convert::TryFrom,
51 mem,
52 sync::Arc,
53};
54lazy_static! {
55 static ref INVALID_BLAME_OR_STATE_ROOT_COUNTER: Arc<dyn Counter<usize>> =
56 CounterUsize::register_with_group(
57 "system_metrics",
58 "invalid_blame_or_state_root_count"
59 );
60}
61
62#[derive(Clone)]
63pub struct ConsensusInnerConfig {
64 pub adaptive_weight_beta: u64,
66 pub heavy_block_difficulty_ratio: u64,
68 pub timer_chain_block_difficulty_ratio: u64,
70 pub timer_chain_beta: u64,
72 pub era_epoch_count: u64,
76 pub enable_optimistic_execution: bool,
81 pub enable_state_expose: bool,
83 pub pos_pivot_decision_defer_epoch_count: u64,
85
86 pub cip113_pivot_decision_defer_epoch_count: u64,
87 pub cip113_transition_height: u64,
88
89 pub debug_dump_dir_invalid_state_root: Option<String>,
92 pub debug_invalid_state_root_epoch: Option<H256>,
93 pub force_recompute_height_during_construct_pivot: Option<u64>,
94 pub recovery_latest_mpt_snapshot: bool,
95 pub use_isolated_db_for_mpt_table: bool,
96}
97
98impl ConsensusInnerConfig {
99 pub fn pos_pivot_decision_defer_epoch_count(
100 &self, confirmed_height: u64,
101 ) -> u64 {
102 if confirmed_height >= self.cip113_transition_height {
103 self.cip113_pivot_decision_defer_epoch_count
104 } else {
105 self.pos_pivot_decision_defer_epoch_count
106 }
107 }
108}
109
110#[derive(Copy, Clone, DeriveMallocSizeOf)]
111pub struct StateBlameInfo {
112 pub blame: u32,
113 pub state_vec_root: H256,
114 pub receipts_vec_root: H256,
115 pub logs_bloom_vec_root: H256,
116}
117
118#[derive(DeriveMallocSizeOf)]
124pub struct ConsensusGraphNodeData {
125 pub epoch_number: u64,
128 partial_invalid: bool,
132 pending: bool,
136 inactive_dependency_cnt: usize,
142 activated: bool,
147 force_confirm: usize,
149 blockset_in_own_view_of_epoch: Vec<usize>,
153 ordered_executable_epoch_blocks: Vec<usize>,
159 skipped_epoch_blocks: Vec<H256>,
166 blockset_cleared: bool,
169 sequence_number: u64,
174 past_view_timer_longest_difficulty: i128,
176 past_view_last_timer_block_arena_index: usize,
178 ledger_view_timer_chain_height: u64,
182 vote_valid_lca_height: u64,
185 vote_valid: bool,
188 last_pivot_in_past: u64,
191 pub state_valid: Option<bool>,
195 blame_info: Option<StateBlameInfo>,
198}
199
200impl ConsensusGraphNodeData {
201 fn new(
202 epoch_number: u64, sequence_number: u64, inactive_dependency_cnt: usize,
203 ) -> Self {
204 ConsensusGraphNodeData {
205 epoch_number,
206 partial_invalid: false,
207 pending: false,
208 inactive_dependency_cnt,
209 activated: false,
210 force_confirm: NULL,
211 blockset_in_own_view_of_epoch: Default::default(),
212 ordered_executable_epoch_blocks: Default::default(),
213 skipped_epoch_blocks: Default::default(),
214 blockset_cleared: true,
215 sequence_number,
216 past_view_timer_longest_difficulty: 0,
217 past_view_last_timer_block_arena_index: NULL,
218 ledger_view_timer_chain_height: 0,
219 vote_valid_lca_height: NULLU64,
220 vote_valid: true,
221 last_pivot_in_past: 0,
222 state_valid: None,
223 blame_info: None,
224 }
225 }
226}
227
228#[derive(DeriveMallocSizeOf)]
229struct ConsensusGraphPivotData {
230 last_pivot_in_past_blocks: HashSet<usize>,
233 past_weight: i128,
236}
237
238impl Default for ConsensusGraphPivotData {
239 fn default() -> Self {
240 ConsensusGraphPivotData {
241 last_pivot_in_past_blocks: HashSet::new(),
242 past_weight: 0,
243 }
244 }
245}
246
247pub struct ConsensusGraphInner {
453 pub data_man: Arc<BlockDataManager>,
455 pub pos_verifier: Arc<PosVerifier>,
456 pub inner_conf: ConsensusInnerConfig,
457 pub pow_config: ProofOfWorkConfig,
458 pub pow: Arc<PowComputer>,
459 pub arena: Slab<ConsensusGraphNode>,
463 pub hash_to_arena_indices: FastHashMap<H256, usize>,
465 pivot_chain: Vec<usize>,
467 pivot_chain_metadata: Vec<ConsensusGraphPivotData>,
469 timer_chain: Vec<usize>,
471 timer_chain_accumulative_lca: Vec<usize>,
473 terminal_hashes: HashSet<H256>,
476 cur_era_genesis_block_arena_index: usize,
480 cur_era_genesis_height: u64,
482 cur_era_stable_height: u64,
485 cur_era_stable_block_hash: H256,
489 initial_stable_future: Option<BitSet>,
493 cur_era_genesis_timer_chain_height: u64,
495 best_timer_chain_difficulty: i128,
497 best_timer_chain_hash: H256,
498
499 best_pos_pivot_decision: (H256, u64),
504
505 weight_tree: SizeMinLinkCutTree,
507 adaptive_tree: CaterpillarMinLinkCutTree,
510 invalid_block_queue: BinaryHeap<(i128, usize)>,
513 pub current_difficulty: U256,
515 anticone_cache: AnticoneCache,
518 pastset_cache: PastSetCache,
519 sequence_number_of_block_entrance: u64,
520
521 best_terminals_lca_height_cache: FastHashMap<usize, u64>,
526 best_terminals_reorg_height: u64,
529 has_timer_block_in_anticone_cache: HashSet<usize>,
532
533 header_only: bool,
536}
537
538impl MallocSizeOf for ConsensusGraphInner {
539 fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
540 self.arena.size_of(ops)
541 + self.hash_to_arena_indices.size_of(ops)
542 + self.pivot_chain.size_of(ops)
543 + self.pivot_chain_metadata.size_of(ops)
544 + self.timer_chain.size_of(ops)
545 + self.timer_chain_accumulative_lca.size_of(ops)
546 + self.terminal_hashes.size_of(ops)
547 + self.initial_stable_future.size_of(ops)
548 + self.weight_tree.size_of(ops)
549 + self.adaptive_tree.size_of(ops)
550 + self.invalid_block_queue.size_of(ops)
551 + self.pow_config.size_of(ops)
552 + self.data_man.size_of(ops)
553 + self.anticone_cache.size_of(ops)
554 + self.pastset_cache.size_of(ops)
555 + self.best_terminals_lca_height_cache.size_of(ops)
556 + self.best_terminals_reorg_height.size_of(ops)
557 }
558}
559
560#[derive(DeriveMallocSizeOf)]
561pub struct ConsensusGraphNode {
562 pub hash: H256,
563 pub height: u64,
564 pub parent: usize,
565 difficulty: U256,
566 is_heavy: bool,
567 is_timer: bool,
568 past_num_blocks: u64,
570 adaptive: bool,
571
572 era_block: usize,
576 children: Vec<usize>,
577 referrers: Vec<usize>,
578 referees: Vec<usize>,
579 pub data: ConsensusGraphNodeData,
584}
585
586impl ConsensusGraphNode {
587 pub fn past_num_blocks(&self) -> u64 { self.past_num_blocks }
588
589 pub fn adaptive(&self) -> bool { self.adaptive }
590
591 pub fn pending(&self) -> bool { self.data.pending }
592
593 pub fn partial_invalid(&self) -> bool { self.data.partial_invalid }
594
595 pub fn era_block(&self) -> usize { self.era_block }
596}
597
598impl ConsensusGraphInner {
599 pub fn with_era_genesis(
600 pow_config: ProofOfWorkConfig, pow: Arc<PowComputer>,
601 pos_verifier: Arc<PosVerifier>, data_man: Arc<BlockDataManager>,
602 inner_conf: ConsensusInnerConfig, cur_era_genesis_block_hash: &H256,
603 cur_era_stable_block_hash: &H256,
604 ) -> Self {
605 let genesis_block_header = data_man
606 .block_header_by_hash(cur_era_genesis_block_hash)
607 .expect("genesis block header should exist here");
608 let cur_era_genesis_height = genesis_block_header.height();
609 let stable_block_header = data_man
610 .block_header_by_hash(cur_era_stable_block_hash)
611 .expect("stable genesis block header should exist here");
612 let cur_era_stable_height = stable_block_header.height();
613 let initial_difficulty = pow_config.initial_difficulty;
614 let mut inner = ConsensusGraphInner {
615 arena: Slab::new(),
616 hash_to_arena_indices: FastHashMap::new(),
617 pivot_chain: Vec::new(),
618 pivot_chain_metadata: Vec::new(),
619 timer_chain: Vec::new(),
620 timer_chain_accumulative_lca: Vec::new(),
621 terminal_hashes: Default::default(),
622 cur_era_genesis_block_arena_index: NULL,
623 cur_era_genesis_height,
624 cur_era_stable_height,
625 cur_era_stable_block_hash: cur_era_stable_block_hash.clone(),
628 initial_stable_future: Some(BitSet::new()),
629 cur_era_genesis_timer_chain_height: 0,
630 best_timer_chain_difficulty: 0,
631 best_timer_chain_hash: Default::default(),
632 best_pos_pivot_decision: (
633 *cur_era_genesis_block_hash,
634 cur_era_genesis_height,
635 ),
636 weight_tree: SizeMinLinkCutTree::new(),
637 adaptive_tree: CaterpillarMinLinkCutTree::new(),
638 invalid_block_queue: BinaryHeap::new(),
639 pow_config,
640 pow,
641 current_difficulty: initial_difficulty.into(),
642 data_man: data_man.clone(),
643 pos_verifier,
644 inner_conf,
645 anticone_cache: AnticoneCache::new(),
646 pastset_cache: Default::default(),
647 sequence_number_of_block_entrance: 0,
648 best_terminals_lca_height_cache: Default::default(),
649 best_terminals_reorg_height: NULLU64,
650 has_timer_block_in_anticone_cache: Default::default(),
651 header_only: true,
652 };
653
654 let (genesis_arena_index, _) = inner.insert(&genesis_block_header);
659 if cur_era_genesis_block_hash == cur_era_stable_block_hash {
660 inner
661 .initial_stable_future
662 .as_mut()
663 .unwrap()
664 .add(genesis_arena_index as u32);
665 }
666 inner.arena[genesis_arena_index].data.blockset_cleared = false;
667 if genesis_block_header.height() == 0 {
668 inner.arena[genesis_arena_index].data.state_valid = Some(true);
669 }
670 inner.cur_era_genesis_block_arena_index = genesis_arena_index;
671 inner.arena[genesis_arena_index].data.activated = true;
672 let genesis_block_weight = genesis_block_header.difficulty().low_u128();
673 inner
674 .weight_tree
675 .make_tree(inner.cur_era_genesis_block_arena_index);
676 inner.weight_tree.path_apply(
677 inner.cur_era_genesis_block_arena_index,
678 genesis_block_weight as i128,
679 );
680 inner
681 .adaptive_tree
682 .make_tree(inner.cur_era_genesis_block_arena_index);
683 inner
686 .adaptive_tree
687 .set(inner.cur_era_genesis_block_arena_index, 0);
688 inner.arena[inner.cur_era_genesis_block_arena_index]
689 .data
690 .epoch_number = cur_era_genesis_height;
691 let genesis_epoch_size = inner
692 .data_man
693 .executed_epoch_set_hashes_from_db(cur_era_genesis_height)
694 .expect("Genesis epoch set should be in data manager.")
695 .len();
696 inner.arena[inner.cur_era_genesis_block_arena_index].past_num_blocks =
697 inner
698 .data_man
699 .get_epoch_execution_context(cur_era_genesis_block_hash)
700 .expect("ExecutionContext for cur_era_genesis exists")
701 .start_block_number
702 + genesis_epoch_size as u64
703 - 1;
704 inner.arena[inner.cur_era_genesis_block_arena_index]
705 .data
706 .last_pivot_in_past = cur_era_genesis_height;
707 inner
708 .pivot_chain
709 .push(inner.cur_era_genesis_block_arena_index);
710 let mut last_pivot_in_past_blocks = HashSet::new();
711 last_pivot_in_past_blocks
712 .insert(inner.cur_era_genesis_block_arena_index);
713 inner.pivot_chain_metadata.push(ConsensusGraphPivotData {
714 last_pivot_in_past_blocks,
715 past_weight: genesis_block_weight as i128,
716 });
717 if inner.arena[inner.cur_era_genesis_block_arena_index].is_timer {
718 inner
719 .timer_chain
720 .push(inner.cur_era_genesis_block_arena_index);
721 }
722 inner.arena[inner.cur_era_genesis_block_arena_index]
723 .data
724 .ledger_view_timer_chain_height = 0;
725 inner.best_timer_chain_difficulty =
726 inner.get_timer_difficulty(inner.cur_era_genesis_block_arena_index);
727
728 inner
729 .anticone_cache
730 .update(inner.cur_era_genesis_block_arena_index, &BitSet::new());
731
732 inner
733 }
734
735 fn persist_epoch_set_hashes(&mut self, pivot_index: usize) {
736 let height = self.pivot_index_to_height(pivot_index);
737 let arena_index = self.pivot_chain[pivot_index];
738 let epoch_set_hashes = self
739 .get_ordered_executable_epoch_blocks(arena_index)
740 .iter()
741 .map(|arena_index| self.arena[*arena_index].hash)
742 .collect();
743 let skipped_set_hashes = self
744 .get_or_compute_skipped_epoch_blocks(arena_index)
745 .clone();
746 self.data_man
747 .insert_executed_epoch_set_hashes_to_db(height, &epoch_set_hashes);
748 self.data_man
749 .insert_skipped_epoch_set_hashes_to_db(height, &skipped_set_hashes);
750 }
751
752 #[inline]
753 pub fn current_era_genesis_seq_num(&self) -> u64 {
754 self.arena[self.cur_era_genesis_block_arena_index]
755 .data
756 .sequence_number
757 }
758
759 #[inline]
760 pub fn get_pivot_block_arena_index(&self, height: u64) -> usize {
763 let pivot_index = (height - self.cur_era_genesis_height) as usize;
764 assert!(pivot_index < self.pivot_chain.len());
765 self.pivot_chain[pivot_index]
766 }
767
768 #[inline]
769 pub fn get_pivot_height(&self) -> u64 {
770 self.cur_era_genesis_height + self.pivot_chain.len() as u64
771 }
772
773 #[inline]
774 pub fn height_to_pivot_index(&self, height: u64) -> usize {
775 (height - self.cur_era_genesis_height) as usize
776 }
777
778 #[inline]
779 pub fn pivot_index_to_height(&self, pivot_index: usize) -> u64 {
780 self.cur_era_genesis_height + pivot_index as u64
781 }
782
783 #[inline]
784 fn get_next_sequence_number(&mut self) -> u64 {
785 let sn = self.sequence_number_of_block_entrance;
786 self.sequence_number_of_block_entrance += 1;
787 sn
788 }
789
790 #[inline]
791 pub fn set_initial_sequence_number(&mut self, initial_sn: u64) {
792 self.arena[self.cur_era_genesis_block_arena_index]
793 .data
794 .sequence_number = initial_sn;
795 self.sequence_number_of_block_entrance = initial_sn + 1;
796 }
797
798 #[inline]
799 fn is_heavier(a: (i128, &H256), b: (i128, &H256)) -> bool {
800 (a.0 > b.0) || ((a.0 == b.0) && (*a.1 > *b.1))
801 }
802
803 #[inline]
804 fn ancestor_at(&self, me: usize, height: u64) -> usize {
805 let height_index = self.height_to_pivot_index(height);
806 self.weight_tree.ancestor_at(me, height_index)
807 }
808
809 #[inline]
810 fn lca(&self, me: usize, v: usize) -> usize {
812 if self.arena[v].era_block == NULL || self.arena[me].era_block == NULL {
813 return NULL;
814 }
815 self.weight_tree.lca(me, v)
816 }
817
818 #[inline]
819 fn get_era_genesis_height(&self, parent_height: u64) -> u64 {
820 parent_height / self.inner_conf.era_epoch_count
821 * self.inner_conf.era_epoch_count
822 }
823
824 #[inline]
825 pub fn get_cur_era_genesis_height(&self) -> u64 {
826 self.cur_era_genesis_height
827 }
828
829 #[inline]
830 fn get_era_genesis_block_with_parent(&self, parent: usize) -> usize {
831 if parent == NULL {
832 return 0;
833 }
834 let height = self.arena[parent].height;
835 let era_genesis_height = self.get_era_genesis_height(height);
836 trace!(
837 "height={} era_height={} era_genesis_height={}",
838 height,
839 era_genesis_height,
840 self.cur_era_genesis_height
841 );
842 self.ancestor_at(parent, era_genesis_height)
843 }
844
845 #[inline]
846 pub fn get_epoch_block_hashes(
847 &self, epoch_arena_index: usize,
848 ) -> Vec<H256> {
849 self.get_ordered_executable_epoch_blocks(epoch_arena_index)
850 .iter()
851 .map(|idx| self.arena[*idx].hash)
852 .collect()
853 }
854
855 #[inline]
856 fn get_epoch_start_block_number(&self, epoch_arena_index: usize) -> u64 {
857 let parent = self.arena[epoch_arena_index].parent;
858
859 return self.arena[parent].past_num_blocks + 1;
860 }
861
862 #[inline]
863 fn is_legacy_block(&self, index: usize) -> bool {
864 self.arena[index].era_block == NULL
865 }
866
867 fn compute_blockset_in_own_view_of_epoch_impl(
882 &mut self, lca: usize, pivot: usize,
883 ) {
884 let pastset = self.pastset_cache.get(lca).unwrap();
885 let mut path_to_lca = Vec::new();
886 let mut cur = pivot;
887 while cur != lca {
888 path_to_lca.push(cur);
889 cur = self.arena[cur].parent;
890 }
891 path_to_lca.reverse();
892 let mut visited = BitSet::new();
893 for ancestor_arena_index in path_to_lca {
894 visited.add(ancestor_arena_index as u32);
895 if ancestor_arena_index == pivot
896 || self.arena[ancestor_arena_index].data.blockset_cleared
897 {
898 let mut queue = VecDeque::new();
899 for referee in &self.arena[ancestor_arena_index].referees {
900 if !pastset.contains(*referee as u32)
901 && !visited.contains(*referee as u32)
902 {
903 visited.add(*referee as u32);
904 queue.push_back(*referee);
905 }
906 }
907 while let Some(index) = queue.pop_front() {
908 if ancestor_arena_index == pivot {
909 self.arena[pivot]
910 .data
911 .blockset_in_own_view_of_epoch
912 .push(index);
913 }
914 let parent = self.arena[index].parent;
915 if parent != NULL
916 && !pastset.contains(parent as u32)
917 && !visited.contains(parent as u32)
918 {
919 visited.add(parent as u32);
920 queue.push_back(parent);
921 }
922 for referee in &self.arena[index].referees {
923 if !pastset.contains(*referee as u32)
924 && !visited.contains(*referee as u32)
925 {
926 visited.add(*referee as u32);
927 queue.push_back(*referee);
928 }
929 }
930 }
931 } else {
932 for index in &self.arena[ancestor_arena_index]
933 .data
934 .blockset_in_own_view_of_epoch
935 {
936 visited.add(*index as u32);
937 }
938 }
939 }
940 }
941
942 fn compute_blockset_in_own_view_of_epoch(&mut self, pivot: usize) {
952 if !self.arena[pivot].data.blockset_cleared {
953 return;
954 }
955 let parent = self.arena[pivot].parent;
957 if parent != NULL {
958 let last = *self.pivot_chain.last().unwrap();
959 let lca = self.lca(last, parent);
960 assert!(lca != NULL);
961 if self.pastset_cache.get_and_update_cache(lca).is_none() {
962 let pastset = self.compute_pastset_brutal(lca);
963 self.pastset_cache.update(lca, pastset);
964 }
965 self.compute_blockset_in_own_view_of_epoch_impl(lca, pivot);
966 }
967
968 let mut filtered_blockset = HashSet::new();
969 let mut different_era_blocks = Vec::new();
970 for idx in &self.arena[pivot].data.blockset_in_own_view_of_epoch {
971 if self.is_same_era(*idx, pivot) {
972 filtered_blockset.insert(*idx);
973 } else {
974 different_era_blocks.push(*idx);
975 }
976 }
977
978 let mut ordered_executable_epoch_blocks = self
979 .topological_sort_with_order_indicator(filtered_blockset, |i| {
980 self.arena[i].hash
981 });
982 ordered_executable_epoch_blocks.push(pivot);
983 let skipped_epoch_block_indices = if ordered_executable_epoch_blocks
984 .len()
985 > EPOCH_EXECUTED_BLOCK_BOUND
986 {
987 let cut_off = ordered_executable_epoch_blocks.len()
988 - EPOCH_EXECUTED_BLOCK_BOUND;
989 let mut skipped_epoch_block_indices =
990 ordered_executable_epoch_blocks;
991 ordered_executable_epoch_blocks =
992 skipped_epoch_block_indices.split_off(cut_off);
993 skipped_epoch_block_indices.append(&mut different_era_blocks);
994 skipped_epoch_block_indices
995 } else {
996 different_era_blocks
997 };
998
999 self.arena[pivot].data.skipped_epoch_blocks =
1000 skipped_epoch_block_indices
1001 .into_iter()
1002 .map(|i| self.arena[i].hash)
1003 .collect();
1004 self.arena[pivot].data.ordered_executable_epoch_blocks =
1005 ordered_executable_epoch_blocks;
1006 self.arena[pivot].data.blockset_cleared = false;
1007 }
1008
1009 #[inline]
1010 fn exchange_or_compute_blockset_in_own_view_of_epoch(
1011 &mut self, index: usize, blockset_opt: Option<Vec<usize>>,
1012 ) -> Vec<usize> {
1013 if let Some(blockset) = blockset_opt {
1014 mem::replace(
1015 &mut self.arena[index].data.blockset_in_own_view_of_epoch,
1016 blockset,
1017 )
1018 } else {
1019 if self.arena[index].data.blockset_cleared {
1020 self.compute_blockset_in_own_view_of_epoch(index);
1021 }
1022 mem::replace(
1023 &mut self.arena[index].data.blockset_in_own_view_of_epoch,
1024 Default::default(),
1025 )
1026 }
1027 }
1028
1029 #[inline]
1030 pub fn get_ordered_executable_epoch_blocks(
1031 &self, index: usize,
1032 ) -> &Vec<usize> {
1033 &self.arena[index].data.ordered_executable_epoch_blocks
1034 }
1035
1036 #[inline]
1037 pub fn get_or_compute_skipped_epoch_blocks(
1038 &mut self, index: usize,
1039 ) -> &Vec<H256> {
1040 if self.arena[index].data.blockset_cleared {
1041 self.compute_blockset_in_own_view_of_epoch(index);
1042 }
1043 &self.arena[index].data.skipped_epoch_blocks
1044 }
1045
1046 #[inline]
1047 pub fn get_skipped_epoch_blocks(&self, index: usize) -> Option<&Vec<H256>> {
1048 if self.arena[index].data.blockset_cleared {
1049 None
1050 } else {
1051 Some(&self.arena[index].data.skipped_epoch_blocks)
1052 }
1053 }
1054
1055 fn get_blame(&self, arena_index: usize) -> u32 {
1056 let block_header = self
1057 .data_man
1058 .block_header_by_hash(&self.arena[arena_index].hash)
1059 .unwrap();
1060 block_header.blame()
1061 }
1062
1063 fn get_blame_with_pivot_index(&self, pivot_index: usize) -> u32 {
1064 let arena_index = self.pivot_chain[pivot_index];
1065 self.get_blame(arena_index)
1066 }
1067
1068 pub fn find_first_index_with_correct_state_of(
1069 &self, pivot_index: usize, blame_bound: Option<u32>,
1070 min_vote_count: usize,
1071 ) -> Option<usize> {
1072 let from = pivot_index + DEFERRED_STATE_EPOCH_COUNT as usize;
1075
1076 self.find_first_trusted_starting_from(from, blame_bound, min_vote_count)
1077 }
1078
1079 pub fn find_first_trusted_starting_from(
1080 &self, from: usize, blame_bound: Option<u32>, min_vote_count: usize,
1081 ) -> Option<usize> {
1082 let mut trusted_index = match self
1083 .find_first_with_trusted_blame_starting_from(
1084 from,
1085 blame_bound,
1086 min_vote_count,
1087 ) {
1088 None => return None,
1089 Some(index) => index,
1090 };
1091
1092 while trusted_index != from {
1095 let blame = self.get_blame_with_pivot_index(trusted_index);
1096 let prev_trusted = trusted_index - blame as usize - 1;
1097
1098 if prev_trusted < from {
1099 break;
1100 }
1101
1102 trusted_index = prev_trusted;
1103 }
1104
1105 Some(trusted_index)
1106 }
1107
1108 fn find_first_with_trusted_blame_starting_from(
1109 &self, pivot_index: usize, blame_bound: Option<u32>,
1110 min_vote_count: usize,
1111 ) -> Option<usize> {
1112 trace!(
1113 "find_first_with_trusted_blame_starting_from pivot_index={:?}",
1114 pivot_index
1115 );
1116 let mut cur_pivot_index = pivot_index;
1117 while cur_pivot_index < self.pivot_chain.len() {
1118 let arena_index = self.pivot_chain[cur_pivot_index];
1119 let blame_ratio = self.compute_blame_ratio(
1120 arena_index,
1121 blame_bound,
1122 min_vote_count,
1123 );
1124 trace!(
1125 "blame_ratio for {:?} is {}",
1126 self.arena[arena_index].hash,
1127 blame_ratio
1128 );
1129 if blame_ratio < MAX_BLAME_RATIO_FOR_TRUST {
1130 return Some(cur_pivot_index);
1131 }
1132 cur_pivot_index += 1;
1133 }
1134
1135 None
1136 }
1137
1138 fn compute_blame_ratio(
1140 &self, arena_index: usize, blame_bound: Option<u32>,
1141 min_vote_count: usize,
1142 ) -> f64 {
1143 let blame_bound = if let Some(bound) = blame_bound {
1144 bound
1145 } else {
1146 u32::max_value()
1147 };
1148 let mut total_blame_count = 0 as u64;
1149 let mut queue = VecDeque::new();
1150 let mut votes = HashMap::new();
1151 queue.push_back((arena_index, 0 as u32));
1152 while let Some((index, step)) = queue.pop_front() {
1153 if index == arena_index {
1154 votes.insert(index, true);
1155 } else {
1156 let mut my_blame = self.get_blame(index);
1157 let mut parent = index;
1158 loop {
1159 parent = self.arena[parent].parent;
1160 if my_blame == 0 {
1161 let parent_vote = *votes.get(&parent).unwrap();
1162 votes.insert(index, parent_vote);
1163 if !parent_vote {
1164 total_blame_count += 1;
1165 }
1166 break;
1167 } else if parent == arena_index {
1168 votes.insert(index, false);
1169 total_blame_count += 1;
1170 break;
1171 }
1172 my_blame -= 1;
1173 }
1174 }
1175
1176 if step == blame_bound {
1177 continue;
1178 }
1179
1180 let next_step = step + 1;
1181 for child in &self.arena[index].children {
1182 queue.push_back((*child, next_step));
1183 }
1184 }
1185
1186 let total_vote_count = votes.len();
1187
1188 if total_vote_count < min_vote_count {
1189 return 1.0;
1191 }
1192
1193 total_blame_count as f64 / total_vote_count as f64
1196 }
1197
1198 pub fn check_mining_adaptive_block(
1199 &mut self, parent_arena_index: usize, referee_indices: Vec<usize>,
1200 difficulty: U256, pos_reference: Option<PosBlockId>,
1201 ) -> bool {
1202 let parent_anticone_opt = self.anticone_cache.get(parent_arena_index);
1204 let mut anticone;
1205 if parent_anticone_opt.is_none() {
1206 anticone = consensus_new_block_handler::ConsensusNewBlockHandler::compute_anticone_bruteforce(
1207 self, parent_arena_index,
1208 );
1209 for i in self.compute_future_bitset(parent_arena_index) {
1210 anticone.add(i);
1211 }
1212 } else {
1213 anticone = self.compute_future_bitset(parent_arena_index);
1214 for index in parent_anticone_opt.unwrap() {
1215 anticone.add(*index as u32);
1216 }
1217 }
1218 let mut my_past = BitSet::new();
1219 let mut queue: VecDeque<usize> = VecDeque::new();
1220 for index in &referee_indices {
1221 queue.push_back(*index);
1222 }
1223 while let Some(index) = queue.pop_front() {
1224 if my_past.contains(index as u32) {
1225 continue;
1226 }
1227 my_past.add(index as u32);
1228 let idx_parent = self.arena[index].parent;
1229 if idx_parent != NULL {
1230 if anticone.contains(idx_parent as u32)
1231 || self.arena[idx_parent].era_block == NULL
1232 {
1233 queue.push_back(idx_parent);
1234 }
1235 }
1236 for referee in &self.arena[index].referees {
1237 if anticone.contains(*referee as u32)
1238 || self.arena[*referee].era_block == NULL
1239 {
1240 queue.push_back(*referee);
1241 }
1242 }
1243 }
1244 for index in my_past.drain() {
1245 anticone.remove(index);
1246 }
1247
1248 let mut anticone_barrier = BitSet::new();
1249 for index in (&anticone).iter() {
1250 let parent = self.arena[index as usize].parent as u32;
1251 if self.arena[index as usize].era_block != NULL
1252 && !anticone.contains(parent)
1253 {
1254 anticone_barrier.add(index);
1255 }
1256 }
1257
1258 let timer_chain_tuple = self.compute_timer_chain_tuple(
1259 parent_arena_index,
1260 &referee_indices,
1261 Some(&anticone),
1262 );
1263
1264 self.adaptive_weight_impl(
1265 parent_arena_index,
1266 &anticone_barrier,
1267 None,
1268 &timer_chain_tuple,
1269 i128::try_from(difficulty.low_u128()).unwrap(),
1270 pos_reference,
1271 )
1272 }
1273
1274 fn compute_subtree_weights(
1294 &self, me: usize, anticone_barrier: &BitSet,
1295 ) -> Vec<i128> {
1296 let mut subtree_weight = Vec::new();
1297 let n = self.arena.capacity();
1298 subtree_weight.resize_with(n, Default::default);
1299 let mut stack = Vec::new();
1300 stack.push((0, self.cur_era_genesis_block_arena_index));
1301 while let Some((stage, index)) = stack.pop() {
1302 if stage == 0 {
1303 stack.push((1, index));
1304 subtree_weight[index] = 0;
1305 for child in &self.arena[index].children {
1306 if !anticone_barrier.contains(*child as u32) && *child != me
1307 {
1308 stack.push((0, *child));
1309 }
1310 }
1311 } else {
1312 let weight = self.block_weight(index);
1313 subtree_weight[index] += weight;
1314 let parent = self.arena[index].parent;
1315 if parent != NULL {
1316 subtree_weight[parent] += subtree_weight[index];
1317 }
1318 }
1319 }
1320 subtree_weight
1321 }
1322
1323 fn get_best_timer_tick(
1324 &self,
1325 timer_chain_tuple: &(u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>),
1326 ) -> u64 {
1327 let (fork_at, _, _, c) = timer_chain_tuple;
1328 *fork_at + c.len() as u64
1329 }
1330
1331 fn get_timer_tick(
1332 &self, me: usize,
1333 timer_chain_tuple: &(u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>),
1334 ) -> u64 {
1335 let (fork_at, m, _, _) = timer_chain_tuple;
1336 if let Some(t) = m.get(&me) {
1337 return *t;
1338 } else {
1339 assert!(
1340 self.arena[me].data.ledger_view_timer_chain_height <= *fork_at
1341 );
1342 }
1343 return self.arena[me].data.ledger_view_timer_chain_height;
1344 }
1345
1346 fn adaptive_weight_impl_brutal(
1347 &self, parent_0: usize, subtree_weight: &Vec<i128>,
1348 timer_chain_tuple: &(u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>),
1349 force_confirm: usize, difficulty: i128,
1350 ) -> bool {
1351 let mut parent = parent_0;
1352
1353 let force_confirm_height = self.arena[force_confirm].height;
1354 let timer_me = self.get_best_timer_tick(timer_chain_tuple);
1355
1356 let adjusted_beta =
1357 (self.inner_conf.adaptive_weight_beta as i128) * difficulty;
1358
1359 let mut adaptive = false;
1360 while self.arena[parent].height != force_confirm_height {
1361 let grandparent = self.arena[parent].parent;
1362 let timer_parent = self.get_timer_tick(parent, timer_chain_tuple);
1363 assert!(timer_me >= timer_parent);
1364 if timer_me - timer_parent >= self.inner_conf.timer_chain_beta {
1365 let w = 2 * subtree_weight[parent]
1366 - subtree_weight[grandparent]
1367 + self.block_weight(grandparent);
1368 if w < adjusted_beta {
1369 adaptive = true;
1370 break;
1371 }
1372 }
1373 parent = grandparent;
1374 }
1375
1376 adaptive
1377 }
1378
1379 fn adaptive_weight_impl(
1380 &mut self, parent_0: usize, anticone_barrier: &BitSet,
1381 weight_tuple: Option<&Vec<i128>>,
1382 timer_chain_tuple: &(u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>),
1383 difficulty: i128, pos_reference: Option<PosBlockId>,
1384 ) -> bool {
1385 let mut parent = parent_0;
1386 let force_confirm =
1387 self.compute_block_force_confirm(timer_chain_tuple, pos_reference);
1388 let force_confirm_height = self.arena[force_confirm].height;
1389 if self.arena[parent].height < force_confirm_height
1392 || self.ancestor_at(parent, force_confirm_height) != force_confirm
1393 {
1394 return false;
1395 }
1396 if let Some(subtree_weight) = weight_tuple {
1397 return self.adaptive_weight_impl_brutal(
1398 parent_0,
1399 subtree_weight,
1400 timer_chain_tuple,
1401 force_confirm,
1402 difficulty,
1403 );
1404 }
1405
1406 let mut weight_delta = HashMap::new();
1407
1408 for index in anticone_barrier.iter() {
1409 assert!(!self.is_legacy_block(index as usize));
1410 weight_delta
1411 .insert(index as usize, self.weight_tree.get(index as usize));
1412 }
1413
1414 for (index, delta) in &weight_delta {
1415 self.weight_tree.path_apply(*index, -*delta);
1416 let parent = self.arena[*index].parent;
1417 assert!(parent != NULL);
1418 self.adaptive_tree.caterpillar_apply(parent, *delta);
1419 self.adaptive_tree.path_apply(*index, -*delta * 2);
1420 }
1421
1422 let timer_me = self.get_best_timer_tick(timer_chain_tuple);
1423 let adjusted_beta = self.inner_conf.timer_chain_beta;
1424
1425 let mut high = self.arena[parent].height;
1426 let mut low = force_confirm_height + 1;
1427 let mut best = force_confirm_height;
1429
1430 while low <= high {
1431 let mid = (low + high) / 2;
1432 let p = self.ancestor_at(parent, mid);
1433 let timer_mid = self.get_timer_tick(p, timer_chain_tuple);
1434 assert!(timer_me >= timer_mid);
1435 if timer_me - timer_mid >= adjusted_beta {
1436 best = mid;
1437 low = mid + 1;
1438 } else {
1439 high = mid - 1;
1440 }
1441 }
1442
1443 let adaptive = if best != force_confirm_height {
1444 parent = self.ancestor_at(parent, best);
1445
1446 let a = self
1447 .adaptive_tree
1448 .path_aggregate_chop(parent, force_confirm);
1449 let b = self.inner_conf.adaptive_weight_beta as i128 * difficulty;
1450
1451 if a < b {
1452 debug!("block is adaptive: {:?} < {:?}!", a, b);
1453 } else {
1454 debug!("block is not adaptive: {:?} >= {:?}!", a, b);
1455 }
1456 a < b
1457 } else {
1458 debug!(
1459 "block is not adaptive: too close to genesis, timer tick {:?}",
1460 timer_me
1461 );
1462 false
1463 };
1464
1465 for (index, delta) in &weight_delta {
1466 self.weight_tree.path_apply(*index, *delta);
1467 let parent = self.arena[*index].parent;
1468 self.adaptive_tree.caterpillar_apply(parent, -*delta);
1469 self.adaptive_tree.path_apply(*index, *delta * 2)
1470 }
1471
1472 adaptive
1473 }
1474
1475 fn adaptive_weight(
1478 &mut self, me: usize, anticone_barrier: &BitSet,
1479 weight_tuple: Option<&Vec<i128>>,
1480 timer_chain_tuple: &(u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>),
1481 ) -> bool {
1482 let parent = self.arena[me].parent;
1483 assert!(parent != NULL);
1484
1485 let difficulty =
1486 i128::try_from(self.arena[me].difficulty.low_u128()).unwrap();
1487
1488 self.adaptive_weight_impl(
1489 parent,
1490 anticone_barrier,
1491 weight_tuple,
1492 timer_chain_tuple,
1493 difficulty,
1494 self.data_man
1495 .pos_reference_by_hash(&self.arena[me].hash)
1496 .expect("header exist"),
1497 )
1498 }
1499
1500 #[inline]
1501 fn is_same_era(&self, me: usize, pivot: usize) -> bool {
1502 self.arena[me].era_block == self.arena[pivot].era_block
1503 }
1504
1505 fn compute_pastset_brutal(&mut self, me: usize) -> BitSet {
1506 let mut path = Vec::new();
1507 let mut cur = me;
1508 while cur != NULL && self.pastset_cache.get(cur).is_none() {
1509 path.push(cur);
1510 cur = self.arena[cur].parent;
1511 }
1512 path.reverse();
1513 let mut result = self
1514 .pastset_cache
1515 .get(cur)
1516 .unwrap_or(&BitSet::new())
1517 .clone();
1518 for ancestor_arena_index in path {
1519 result.add(ancestor_arena_index as u32);
1520 if self.arena[ancestor_arena_index].data.blockset_cleared {
1521 let mut queue = VecDeque::new();
1522 queue.push_back(ancestor_arena_index);
1523 while let Some(index) = queue.pop_front() {
1524 let parent = self.arena[index].parent;
1525 if parent != NULL && !result.contains(parent as u32) {
1526 result.add(parent as u32);
1527 queue.push_back(parent);
1528 }
1529 for referee in &self.arena[index].referees {
1530 if !result.contains(*referee as u32) {
1531 result.add(*referee as u32);
1532 queue.push_back(*referee);
1533 }
1534 }
1535 }
1536 } else {
1537 let blockset = self
1538 .exchange_or_compute_blockset_in_own_view_of_epoch(
1539 ancestor_arena_index,
1540 None,
1541 );
1542 for index in &blockset {
1543 result.add(*index as u32);
1544 }
1545 self.exchange_or_compute_blockset_in_own_view_of_epoch(
1546 ancestor_arena_index,
1547 Some(blockset),
1548 );
1549 }
1550 }
1551 result
1552 }
1553
1554 fn insert_referee_if_not_duplicate(
1561 &self, referees: &mut Vec<usize>, me: usize,
1562 ) {
1563 for i in 0..referees.len() {
1564 let x = referees[i];
1565 let lca = self.lca(x, me);
1566 if lca == me {
1567 return;
1569 } else if lca == x {
1570 referees[i] = me;
1572 return;
1573 }
1574 }
1575 referees.push(me)
1576 }
1577
1578 pub fn insert_out_era_block(
1582 &mut self, block_header: &BlockHeader, partial_invalid: bool,
1583 ) -> (u64, usize) {
1584 let sn = self.get_next_sequence_number();
1585 let hash = block_header.hash();
1586 let parent = self
1588 .hash_to_arena_indices
1589 .get(block_header.parent_hash())
1590 .cloned()
1591 .unwrap_or(NULL);
1592
1593 let mut referees: Vec<usize> = Vec::new();
1594 for hash in block_header.referee_hashes().iter() {
1595 if let Some(x) = self.hash_to_arena_indices.get(hash) {
1596 self.insert_referee_if_not_duplicate(&mut referees, *x);
1597 }
1598 }
1599
1600 if parent == NULL && referees.is_empty() {
1601 return (sn, NULL);
1602 }
1603
1604 let mut inactive_dependency_cnt = 0;
1605 for referee in &referees {
1606 if !self.arena[*referee].data.activated {
1607 inactive_dependency_cnt += 1;
1608 }
1609 }
1610
1611 let index = self.arena.insert(ConsensusGraphNode {
1614 hash,
1615 height: block_header.height(),
1616 is_heavy: true,
1617 difficulty: *block_header.difficulty(),
1618 past_num_blocks: 0,
1619 is_timer: false,
1620 adaptive: block_header.adaptive(),
1623 parent,
1624 era_block: NULL,
1625 children: Vec::new(),
1626 referees,
1627 referrers: Vec::new(),
1628 data: ConsensusGraphNodeData::new(
1629 NULLU64,
1630 sn,
1631 inactive_dependency_cnt,
1632 ),
1633 });
1634 self.arena[index].data.pending = true;
1635 self.arena[index].data.activated = false;
1636 self.arena[index].data.partial_invalid = partial_invalid;
1637 self.hash_to_arena_indices.insert(hash, index);
1638
1639 let referees = self.arena[index].referees.clone();
1640 for referee in referees {
1641 self.arena[referee].referrers.push(index);
1642 }
1643 if parent != NULL {
1644 self.arena[parent].children.push(index);
1645 }
1646
1647 self.weight_tree.make_tree(index);
1648 self.adaptive_tree.make_tree(index);
1649
1650 (sn, index)
1651 }
1652
1653 fn get_timer_difficulty(&self, me: usize) -> i128 {
1654 if self.arena[me].is_timer && !self.arena[me].data.partial_invalid {
1655 i128::try_from(self.arena[me].difficulty.low_u128()).unwrap()
1656 } else {
1657 0
1658 }
1659 }
1660
1661 fn compute_global_force_confirm(&self) -> usize {
1662 let timer_chain_choice =
1663 if let Some(x) = self.timer_chain_accumulative_lca.last() {
1664 *x
1665 } else {
1666 self.cur_era_genesis_block_arena_index
1667 };
1668 self.compute_force_confirm(
1669 timer_chain_choice,
1670 &self.best_pos_pivot_decision,
1671 )
1672 }
1673
1674 fn compute_block_force_confirm(
1675 &self,
1676 timer_chain_tuple: &(u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>),
1677 pos_reference: Option<PosBlockId>,
1678 ) -> usize {
1679 let (fork_at, _, extra_lca, tmp_chain) = timer_chain_tuple;
1680 let fork_end_index =
1681 (*fork_at - self.cur_era_genesis_timer_chain_height) as usize
1682 + tmp_chain.len();
1683 let acc_lca_ref = extra_lca;
1684 let timer_chain_choice = if let Some(x) = acc_lca_ref.last() {
1685 *x
1686 } else if fork_end_index > self.inner_conf.timer_chain_beta as usize {
1687 self.timer_chain_accumulative_lca
1688 [fork_end_index - self.inner_conf.timer_chain_beta as usize - 1]
1689 } else {
1690 self.cur_era_genesis_block_arena_index
1691 };
1692 match pos_reference {
1693 None => timer_chain_choice,
1694 Some(pos_reference) => {
1695 let pos_pivot_decision = self
1696 .pos_verifier
1697 .get_pivot_decision(&pos_reference)
1698 .expect("pos_reference checked");
1699 self.compute_force_confirm(
1700 timer_chain_choice,
1701 &(
1702 pos_pivot_decision,
1703 self.data_man
1704 .block_height_by_hash(&pos_pivot_decision)
1705 .expect("pos pivot decision checked"),
1706 ),
1707 )
1708 }
1709 }
1710 }
1711
1712 fn compute_force_confirm(
1713 &self, timer_chain_choice: usize, pos_pivot_decision: &(H256, u64),
1714 ) -> usize {
1715 if let Some(arena_index) =
1716 self.hash_to_arena_indices.get(&pos_pivot_decision.0)
1717 {
1718 if self.arena[timer_chain_choice].height > pos_pivot_decision.1
1719 && self.lca(timer_chain_choice, *arena_index) == *arena_index
1720 {
1721 timer_chain_choice
1724 } else {
1725 *arena_index
1728 }
1729 } else {
1730 timer_chain_choice
1733 }
1734 }
1735
1736 fn insert(&mut self, block_header: &BlockHeader) -> (usize, usize) {
1737 let hash = block_header.hash();
1738
1739 let pow_quality =
1740 U512::from(VerificationConfig::get_or_compute_header_pow_quality(
1741 &self.pow,
1742 block_header,
1743 ));
1744 let is_heavy = pow_quality
1745 >= U512::from(self.inner_conf.heavy_block_difficulty_ratio)
1746 * U512::from(block_header.difficulty());
1747 let is_timer = pow_quality
1748 >= U512::from(self.inner_conf.timer_chain_block_difficulty_ratio)
1749 * U512::from(block_header.difficulty());
1750
1751 let parent =
1752 if hash != self.data_man.get_cur_consensus_era_genesis_hash() {
1753 self.hash_to_arena_indices
1754 .get(block_header.parent_hash())
1755 .cloned()
1756 .unwrap()
1757 } else {
1758 NULL
1759 };
1760
1761 let mut referees: Vec<usize> = Vec::new();
1762 for hash in block_header.referee_hashes().iter() {
1763 if let Some(x) = self.hash_to_arena_indices.get(hash) {
1764 self.insert_referee_if_not_duplicate(&mut referees, *x);
1765 }
1766 }
1767
1768 let mut inactive_dependency_cnt =
1769 if parent != NULL && !self.arena[parent].data.activated {
1770 1
1771 } else {
1772 0
1773 };
1774 for referee in &referees {
1775 if !self.arena[*referee].data.activated {
1776 inactive_dependency_cnt += 1;
1777 }
1778 }
1779
1780 let my_height = block_header.height();
1781 let sn = self.get_next_sequence_number();
1782 let index = self.arena.insert(ConsensusGraphNode {
1783 hash,
1784 height: my_height,
1785 is_heavy,
1786 difficulty: *block_header.difficulty(),
1787 past_num_blocks: 0,
1788 is_timer,
1789 adaptive: block_header.adaptive(),
1792 parent,
1793 era_block: self.get_era_genesis_block_with_parent(parent),
1794 children: Vec::new(),
1795 referees,
1796 referrers: Vec::new(),
1797 data: ConsensusGraphNodeData::new(
1798 NULLU64,
1799 sn,
1800 inactive_dependency_cnt,
1801 ),
1802 });
1803 self.hash_to_arena_indices.insert(hash, index);
1804
1805 if parent != NULL {
1806 self.arena[parent].children.push(index);
1807 }
1808 let referees = self.arena[index].referees.clone();
1809 for referee in referees {
1810 self.arena[referee].referrers.push(index);
1811 }
1812
1813 self.compute_blockset_in_own_view_of_epoch(index);
1814 let executed_epoch_len =
1815 self.get_ordered_executable_epoch_blocks(index).len();
1816
1817 if parent != NULL {
1818 let past_num_blocks =
1819 self.arena[parent].past_num_blocks + executed_epoch_len as u64;
1820
1821 self.data_man.insert_epoch_execution_context(
1822 hash.clone(),
1823 EpochExecutionContext {
1824 start_block_number: self
1825 .get_epoch_start_block_number(index),
1826 },
1827 true, );
1829
1830 self.arena[index].past_num_blocks = past_num_blocks;
1831 }
1832
1833 debug!(
1834 "Block {} inserted into Consensus with index={}",
1835 hash, index
1836 );
1837
1838 (index, self.hash_to_arena_indices.len())
1839 }
1840
1841 fn compute_future_bitset(&self, me: usize) -> BitSet {
1842 let mut queue: VecDeque<usize> = VecDeque::new();
1844 let mut visited = BitSet::with_capacity(self.arena.len() as u32);
1845 queue.push_back(me);
1846 visited.add(me as u32);
1847 while let Some(index) = queue.pop_front() {
1848 for child in &self.arena[index].children {
1849 if !visited.contains(*child as u32)
1850 && (self.arena[*child].data.activated
1851 || self.arena[*child].data.inactive_dependency_cnt
1852 == NULL)
1853 {
1855 visited.add(*child as u32);
1856 queue.push_back(*child);
1857 }
1858 }
1859 for referrer in &self.arena[index].referrers {
1860 if !visited.contains(*referrer as u32)
1861 && (self.arena[*referrer].data.activated
1862 || self.arena[*referrer].data.inactive_dependency_cnt
1863 == NULL)
1864 {
1866 visited.add(*referrer as u32);
1867 queue.push_back(*referrer);
1868 }
1869 }
1870 }
1871 visited.remove(me as u32);
1872 visited
1873 }
1874
1875 pub fn get_pivot_reward_index(
1901 &self, epoch_arena_index: usize,
1902 ) -> Option<(usize, usize)> {
1903 if self.arena[epoch_arena_index].height <= REWARD_EPOCH_COUNT {
1905 return None;
1906 }
1907 let parent_index = self.arena[epoch_arena_index].parent;
1908 let anticone_cut_height =
1910 REWARD_EPOCH_COUNT - ANTICONE_PENALTY_UPPER_EPOCH_COUNT;
1911 let mut anticone_penalty_cutoff_epoch_block = parent_index;
1912 for _i in 1..anticone_cut_height {
1913 if anticone_penalty_cutoff_epoch_block == NULL {
1914 break;
1915 }
1916 anticone_penalty_cutoff_epoch_block =
1917 self.arena[anticone_penalty_cutoff_epoch_block].parent;
1918 }
1919 let mut reward_epoch_block = anticone_penalty_cutoff_epoch_block;
1920 for _i in 0..ANTICONE_PENALTY_UPPER_EPOCH_COUNT {
1921 if reward_epoch_block == NULL {
1922 break;
1923 }
1924 reward_epoch_block = self.arena[reward_epoch_block].parent;
1925 }
1926 if reward_epoch_block != NULL {
1927 while !self.is_same_era(
1929 reward_epoch_block,
1930 anticone_penalty_cutoff_epoch_block,
1931 ) {
1932 anticone_penalty_cutoff_epoch_block =
1933 self.arena[anticone_penalty_cutoff_epoch_block].parent;
1934 }
1935 }
1936 let reward_index = if reward_epoch_block == NULL {
1937 None
1938 } else {
1939 Some((reward_epoch_block, anticone_penalty_cutoff_epoch_block))
1940 };
1941 reward_index
1942 }
1943
1944 fn get_executable_epoch_blocks(
1945 &self, epoch_arena_index: usize,
1946 ) -> Vec<Arc<Block>> {
1947 let mut epoch_blocks = Vec::new();
1948 for idx in self.get_ordered_executable_epoch_blocks(epoch_arena_index) {
1949 let block = self
1950 .data_man
1951 .block_by_hash(
1952 &self.arena[*idx].hash,
1953 true, )
1955 .expect("Exist");
1956 epoch_blocks.push(block);
1957 }
1958 epoch_blocks
1959 }
1960
1961 pub fn expected_difficulty(&self, parent_hash: &H256) -> U256 {
1980 let parent_arena_index =
1981 *self.hash_to_arena_indices.get(parent_hash).unwrap();
1982 let parent_epoch = self.arena[parent_arena_index].height;
1983 if parent_epoch
1984 < self
1985 .pow_config
1986 .difficulty_adjustment_epoch_period(parent_epoch)
1987 {
1988 self.pow_config.initial_difficulty.into()
1990 } else {
1991 let last_period_upper = (parent_epoch
1992 / self
1993 .pow_config
1994 .difficulty_adjustment_epoch_period(parent_epoch))
1995 * self
1996 .pow_config
1997 .difficulty_adjustment_epoch_period(parent_epoch);
1998 if last_period_upper != parent_epoch {
1999 self.arena[parent_arena_index].difficulty
2000 } else {
2001 self.data_man.target_difficulty_manager.target_difficulty(
2002 self,
2003 &self.pow_config,
2005 &self.arena[parent_arena_index].hash,
2006 )
2007 }
2008 }
2009 }
2010
2011 fn adjust_difficulty(&mut self, new_best_arena_index: usize) {
2012 let new_best_hash = self.arena[new_best_arena_index].hash.clone();
2013 let new_best_difficulty = self.arena[new_best_arena_index].difficulty;
2014 let old_best_arena_index = *self.pivot_chain.last().expect("not empty");
2015 if old_best_arena_index == self.arena[new_best_arena_index].parent {
2016 assert!(self.current_difficulty == new_best_difficulty);
2018 }
2019
2020 let epoch = self.arena[new_best_arena_index].height;
2021 if epoch == 0 {
2022 self.current_difficulty = self.pow_config.initial_difficulty.into();
2025 } else if epoch
2026 == (epoch
2027 / self.pow_config.difficulty_adjustment_epoch_period(epoch))
2028 * self.pow_config.difficulty_adjustment_epoch_period(epoch)
2029 {
2030 let this: &ConsensusGraphInner = self;
2031 self.current_difficulty = self
2032 .data_man
2033 .target_difficulty_manager
2034 .target_difficulty(this, &self.pow_config, &new_best_hash);
2035 } else {
2036 self.current_difficulty = new_best_difficulty;
2037 }
2038 }
2039
2040 pub fn best_block_hash(&self) -> H256 {
2041 self.arena[*self.pivot_chain.last().unwrap()].hash
2042 }
2043
2044 pub fn best_block_number(&self) -> u64 {
2045 self.arena[*self.pivot_chain.last().unwrap()].past_num_blocks
2046 }
2047
2048 pub fn best_state_epoch_number(&self) -> u64 {
2053 let pivot_height = self.pivot_index_to_height(self.pivot_chain.len());
2054 if pivot_height < DEFERRED_STATE_EPOCH_COUNT {
2055 0
2056 } else {
2057 pivot_height - DEFERRED_STATE_EPOCH_COUNT
2058 }
2059 }
2060
2061 fn best_state_arena_index(&self) -> usize {
2062 self.get_pivot_block_arena_index(self.best_state_epoch_number())
2063 }
2064
2065 pub fn best_state_block_hash(&self) -> H256 {
2066 self.arena[self.best_state_arena_index()].hash
2067 }
2068
2069 pub fn get_state_block_with_delay(
2070 &self, block_hash: &H256, delay: usize,
2071 ) -> Result<&H256, String> {
2072 let idx_opt = self.hash_to_arena_indices.get(block_hash);
2073 if idx_opt == None {
2074 return Err(
2075 "Parent hash is too old for computing the deferred state"
2076 .to_owned(),
2077 );
2078 }
2079 let mut idx = *idx_opt.unwrap();
2080 for _i in 0..delay {
2081 trace!(
2082 "get_state_block_with_delay: idx={}, height={}",
2083 idx,
2084 self.arena[idx].height
2085 );
2086 if idx == self.cur_era_genesis_block_arena_index {
2087 if self.arena[self.cur_era_genesis_block_arena_index].height
2089 == 0
2090 {
2091 break;
2092 } else {
2093 return Err(
2094 "Parent is too old for computing the deferred state"
2095 .to_owned(),
2096 );
2097 }
2098 }
2099 idx = self.arena[idx].parent;
2100 }
2101 Ok(&self.arena[idx].hash)
2102 }
2103
2104 pub fn best_epoch_number(&self) -> u64 {
2105 self.cur_era_genesis_height + self.pivot_chain.len() as u64 - 1
2106 }
2107
2108 pub fn best_timer_chain_height(&self) -> u64 {
2109 self.cur_era_genesis_timer_chain_height + self.timer_chain.len() as u64
2110 - 1
2111 }
2112
2113 fn get_arena_index_from_epoch_number(
2114 &self, epoch_number: u64,
2115 ) -> Result<usize, String> {
2116 if epoch_number >= self.cur_era_genesis_height {
2117 let pivot_index =
2118 (epoch_number - self.cur_era_genesis_height) as usize;
2119 if pivot_index >= self.pivot_chain.len() {
2120 Err("Epoch number larger than the current pivot chain tip"
2121 .into())
2122 } else {
2123 Ok(self.get_pivot_block_arena_index(epoch_number))
2124 }
2125 } else {
2126 Err("Invalid params: epoch number is too old and not maintained by consensus graph".to_owned())
2127 }
2128 }
2129
2130 pub fn get_pivot_hash_from_epoch_number(
2134 &self, epoch_number: u64,
2135 ) -> Result<EpochId, ProviderBlockError> {
2136 let height = epoch_number;
2137 if height >= self.cur_era_genesis_height {
2138 let pivot_index = (height - self.cur_era_genesis_height) as usize;
2139 if pivot_index >= self.pivot_chain.len() {
2140 Err("Epoch number larger than the current pivot chain tip"
2141 .into())
2142 } else {
2143 Ok(self.arena[self.get_pivot_block_arena_index(height)].hash)
2144 }
2145 } else {
2146 self.data_man.executed_epoch_set_hashes_from_db(epoch_number).ok_or(
2147 format!("get_hash_from_epoch_number: Epoch hash set not in db, epoch_number={}", epoch_number).into()
2148 ).and_then(|epoch_hashes|
2149 epoch_hashes.last().map(Clone::clone).ok_or("Epoch set is empty".into())
2150 )
2151 }
2152 }
2153
2154 pub fn epoch_hash(&self, epoch_number: u64) -> Option<H256> {
2157 let pivot_index = self.height_to_pivot_index(epoch_number);
2158 self.pivot_chain
2159 .get(pivot_index)
2160 .map(|idx| self.arena[*idx].hash)
2161 }
2162
2163 pub fn block_hashes_by_epoch(
2164 &self, epoch_number: u64,
2165 ) -> Result<Vec<H256>, ProviderBlockError> {
2166 debug!(
2167 "block_hashes_by_epoch epoch_number={:?} pivot_chain.len={:?}",
2168 epoch_number,
2169 self.pivot_chain.len()
2170 );
2171
2172 let e;
2173 match self.get_arena_index_from_epoch_number(epoch_number) {
2177 Ok(pivot_arena_index) => {
2178 if pivot_arena_index != self.cur_era_genesis_block_arena_index {
2179 return Ok(self
2180 .get_ordered_executable_epoch_blocks(pivot_arena_index)
2181 .iter()
2182 .map(|index| self.arena[*index].hash)
2183 .collect());
2184 }
2185 e = "Epoch set of the current genesis is not maintained".into();
2186 }
2187 Err(err) => e = err,
2188 }
2189
2190 self.data_man
2191 .executed_epoch_set_hashes_from_db(epoch_number)
2192 .ok_or(
2193 format!(
2194 "Epoch set not in db epoch_number={}, in mem err={:?}",
2195 epoch_number, e
2196 )
2197 .into(),
2198 )
2199 }
2200
2201 pub fn skipped_block_hashes_by_epoch(
2202 &self, epoch_number: u64,
2203 ) -> Result<Vec<H256>, ProviderBlockError> {
2204 debug!(
2205 "skipped_block_hashes_by_epoch epoch_number={:?} pivot_chain.len={:?}",
2206 epoch_number,
2207 self.pivot_chain.len()
2208 );
2209
2210 let e;
2211 match self.get_arena_index_from_epoch_number(epoch_number) {
2215 Ok(pivot_arena_index) => {
2216 if pivot_arena_index != self.cur_era_genesis_block_arena_index {
2217 if let Some(skipped_block_set) =
2218 self.get_skipped_epoch_blocks(pivot_arena_index)
2219 {
2220 return Ok(skipped_block_set.clone());
2221 }
2222 }
2223 e = "Skipped epoch set of the current genesis is not maintained".into();
2224 }
2225 Err(err) => e = err,
2226 }
2227
2228 self.data_man
2229 .skipped_epoch_set_hashes_from_db(epoch_number)
2230 .ok_or(
2231 format!(
2232 "Skipped epoch set not in db epoch_number={}, in mem err={:?}",
2233 epoch_number, e
2234 )
2235 .into(),
2236 )
2237 }
2238
2239 fn get_epoch_hash_for_block(&self, hash: &H256) -> Option<H256> {
2240 self.get_block_epoch_number(&hash)
2241 .and_then(|epoch_number| self.epoch_hash(epoch_number))
2242 }
2243
2244 pub fn bounded_terminal_block_hashes(
2245 &mut self, referee_bound: usize,
2246 ) -> Vec<H256> {
2247 let best_block_arena_index = *self.pivot_chain.last().unwrap();
2248 if self.terminal_hashes.len() > referee_bound {
2249 self.best_terminals(best_block_arena_index, referee_bound)
2250 } else {
2251 self.terminal_hashes
2252 .iter()
2253 .map(|hash| hash.clone())
2254 .collect()
2255 }
2256 }
2257
2258 pub fn get_block_epoch_number(&self, hash: &H256) -> Option<u64> {
2259 self.hash_to_arena_indices.get(hash).and_then(|index| {
2260 match self.arena[*index].data.epoch_number {
2261 NULLU64 => None,
2262 epoch => Some(epoch),
2263 }
2264 })
2265 }
2266
2267 pub fn all_blocks_with_topo_order(&self) -> Vec<H256> {
2268 let epoch_number = self.best_epoch_number();
2269 let mut current_number = 0;
2270 let mut hashes = Vec::new();
2271 while current_number <= epoch_number {
2272 let epoch_hashes =
2273 self.block_hashes_by_epoch(current_number.into()).unwrap();
2274 for hash in epoch_hashes {
2275 hashes.push(hash);
2276 }
2277 current_number += 1;
2278 }
2279 hashes
2280 }
2281
2282 pub fn block_execution_results_by_hash(
2285 &self, hash: &H256, update_cache: bool,
2286 ) -> Option<BlockExecutionResultWithEpoch> {
2287 match self.get_epoch_hash_for_block(hash) {
2288 Some(epoch) => {
2289 trace!("Block {} is in epoch {}", hash, epoch);
2290 let execution_result =
2291 self.data_man.block_execution_result_by_hash_with_epoch(
2292 hash,
2293 &epoch,
2294 false, update_cache,
2296 )?;
2297 Some(DataVersionTuple(epoch, execution_result))
2298 }
2299 None => {
2300 debug!("Block {:?} not in mem, try to read from db", hash);
2301
2302 let res = match self
2305 .data_man
2306 .block_execution_result_by_hash_from_db(hash)
2307 {
2308 None => return None,
2309 Some(res) => res,
2310 };
2311
2312 let execution_pivot_hash = res.0;
2313 let epoch_number = self
2314 .data_man
2315 .block_header_by_hash(&execution_pivot_hash)?
2316 .height();
2317
2318 match self.get_pivot_hash_from_epoch_number(epoch_number) {
2319 Ok(h) if h == execution_pivot_hash => Some(res),
2321
2322 _ => None,
2324 }
2325 }
2326 }
2327 }
2328
2329 pub fn is_timer_block(&self, block_hash: &H256) -> Option<bool> {
2330 self.hash_to_arena_indices
2331 .get(block_hash)
2332 .and_then(|index| Some(self.arena[*index].is_timer))
2333 }
2334
2335 pub fn is_adaptive(&self, block_hash: &H256) -> Option<bool> {
2336 self.hash_to_arena_indices
2337 .get(block_hash)
2338 .and_then(|index| Some(self.arena[*index].adaptive))
2339 }
2340
2341 pub fn is_partial_invalid(&self, block_hash: &H256) -> Option<bool> {
2342 self.hash_to_arena_indices
2343 .get(block_hash)
2344 .and_then(|index| Some(self.arena[*index].data.partial_invalid))
2345 }
2346
2347 pub fn is_pending(&self, block_hash: &H256) -> Option<bool> {
2348 self.hash_to_arena_indices
2349 .get(block_hash)
2350 .and_then(|index| Some(self.arena[*index].data.pending))
2351 }
2352
2353 pub fn check_block_pivot_assumption(
2354 &self, pivot_hash: &H256, epoch: u64,
2355 ) -> Result<(), ProviderBlockError> {
2356 let last_number = self.best_epoch_number();
2357 let hash = self.get_pivot_hash_from_epoch_number(epoch)?;
2358 if epoch > last_number || hash != *pivot_hash {
2359 return Err("Error: pivot chain assumption failed".into());
2360 }
2361 Ok(())
2362 }
2363
2364 fn block_weight(&self, me: usize) -> i128 {
2369 if !self.arena[me].data.activated || self.arena[me].era_block == NULL {
2370 return 0 as i128;
2371 }
2372 let is_heavy = self.arena[me].is_heavy;
2373 let is_adaptive = self.arena[me].adaptive;
2374 if is_adaptive {
2375 if is_heavy {
2376 self.inner_conf.heavy_block_difficulty_ratio as i128
2377 * i128::try_from(self.arena[me].difficulty.low_u128())
2378 .unwrap()
2379 } else {
2380 0 as i128
2381 }
2382 } else {
2383 i128::try_from(self.arena[me].difficulty.low_u128()).unwrap()
2384 }
2385 }
2386
2387 fn compute_blame_and_state_with_execution_result(
2447 &mut self, parent: usize, state_root_hash: H256,
2448 receipts_root_hash: H256, logs_bloom_hash: H256,
2449 ) -> Result<StateBlameInfo, String> {
2450 let mut cur = parent;
2451 let mut blame_cnt: u32 = 0;
2452 let mut state_blame_vec = Vec::new();
2453 let mut receipt_blame_vec = Vec::new();
2454 let mut bloom_blame_vec = Vec::new();
2455 let mut blame_info_to_fill = Vec::new();
2456 state_blame_vec.push(state_root_hash);
2457 receipt_blame_vec.push(receipts_root_hash);
2458 bloom_blame_vec.push(logs_bloom_hash);
2459 loop {
2460 if self.arena[cur]
2461 .data
2462 .state_valid
2463 .expect("computed by the caller")
2464 {
2465 if let Some(last_blame_info) = blame_info_to_fill.pop() {
2469 self.arena[last_blame_info].data.blame_info =
2470 Some(StateBlameInfo {
2471 blame: 0,
2472 state_vec_root: state_blame_vec
2473 .last()
2474 .unwrap()
2475 .clone(),
2476 receipts_vec_root: receipt_blame_vec
2477 .last()
2478 .unwrap()
2479 .clone(),
2480 logs_bloom_vec_root: bloom_blame_vec
2481 .last()
2482 .unwrap()
2483 .clone(),
2484 });
2485 blame_cnt = 1;
2486 }
2487 break;
2488 }
2489
2490 debug!("compute_blame_and_state_with_execution_result: cur={} height={}", cur, self.arena[cur].height);
2491 let deferred_arena_index =
2496 self.get_deferred_state_arena_index(cur)?;
2497 let deferred_block_commitment = self
2498 .data_man
2499 .get_epoch_execution_commitment(
2500 &self.arena[deferred_arena_index].hash,
2501 )
2502 .ok_or("State block commitment missing")?;
2503 if let Some(blame_info) = self.arena[cur].data.blame_info {
2505 blame_cnt = blame_info.blame + 1;
2506 state_blame_vec.push(blame_info.state_vec_root);
2507 receipt_blame_vec.push(blame_info.receipts_vec_root);
2508 bloom_blame_vec.push(blame_info.logs_bloom_vec_root);
2509 break;
2510 }
2511 blame_info_to_fill.push(cur);
2512 if self.arena[cur].height == self.cur_era_genesis_height {
2513 return Err(
2519 "Failed to compute blame and state due to out of era. The blockchain data is probably corrupted."
2520 .to_owned(),
2521 );
2522 }
2523 state_blame_vec.push(
2524 deferred_block_commitment
2525 .state_root_with_aux_info
2526 .aux_info
2527 .state_root_hash,
2528 );
2529 receipt_blame_vec
2530 .push(deferred_block_commitment.receipts_root.clone());
2531 bloom_blame_vec
2532 .push(deferred_block_commitment.logs_bloom_hash.clone());
2533 cur = self.arena[cur].parent;
2534 }
2535 let blame = blame_cnt + blame_info_to_fill.len() as u32;
2536
2537 if blame > 0 {
2548 let mut accumulated_state_root =
2549 state_blame_vec.last().unwrap().clone();
2550 let mut accumulated_receipts_root =
2551 receipt_blame_vec.last().unwrap().clone();
2552 let mut accumulated_logs_bloom_root =
2553 bloom_blame_vec.last().unwrap().clone();
2554 for i in (0..blame_info_to_fill.len()).rev() {
2555 accumulated_state_root =
2556 BlockHeaderBuilder::compute_blame_state_root_incremental(
2557 state_blame_vec[i + 1],
2558 accumulated_state_root,
2559 );
2560 accumulated_receipts_root =
2561 BlockHeaderBuilder::compute_blame_state_root_incremental(
2562 receipt_blame_vec[i + 1],
2563 accumulated_receipts_root,
2564 );
2565 accumulated_logs_bloom_root =
2566 BlockHeaderBuilder::compute_blame_state_root_incremental(
2567 bloom_blame_vec[i + 1],
2568 accumulated_logs_bloom_root,
2569 );
2570 self.arena[blame_info_to_fill[i]].data.blame_info =
2571 Some(StateBlameInfo {
2572 blame: blame_cnt,
2573 state_vec_root: accumulated_state_root,
2574 receipts_vec_root: accumulated_receipts_root,
2575 logs_bloom_vec_root: accumulated_logs_bloom_root,
2576 });
2577 blame_cnt += 1;
2578 }
2579 let state_vec_root =
2580 BlockHeaderBuilder::compute_blame_state_root_incremental(
2581 state_blame_vec[0],
2582 accumulated_state_root,
2583 );
2584 let receipts_vec_root =
2585 BlockHeaderBuilder::compute_blame_state_root_incremental(
2586 receipt_blame_vec[0],
2587 accumulated_receipts_root,
2588 );
2589 let logs_bloom_vec_root =
2590 BlockHeaderBuilder::compute_blame_state_root_incremental(
2591 bloom_blame_vec[0],
2592 accumulated_logs_bloom_root,
2593 );
2594 Ok(StateBlameInfo {
2595 blame,
2596 state_vec_root,
2597 receipts_vec_root,
2598 logs_bloom_vec_root,
2599 })
2600 } else {
2601 Ok(StateBlameInfo {
2602 blame: 0,
2603 state_vec_root: state_blame_vec.pop().unwrap(),
2604 receipts_vec_root: receipt_blame_vec.pop().unwrap(),
2605 logs_bloom_vec_root: bloom_blame_vec.pop().unwrap(),
2606 })
2607 }
2608 }
2609
2610 fn compute_state_valid_and_blame_info_for_block(
2616 &mut self, me: usize, executor: &ConsensusExecutor,
2617 ) -> Result<(), String> {
2618 let block_height = self.arena[me].height;
2619 let block_hash = self.arena[me].hash;
2620 debug!("compute_state_valid: me={} height={}", me, block_height);
2621 let deferred_state_arena_index =
2622 self.get_deferred_state_arena_index(me)?;
2623 let exec_commitment = self
2624 .data_man
2625 .get_epoch_execution_commitment(
2626 &self.arena[deferred_state_arena_index].hash,
2627 )
2628 .expect("Commitment exist");
2629 let parent = self.arena[me].parent;
2630 let original_deferred_state_root =
2631 exec_commitment.state_root_with_aux_info.clone();
2632 let original_deferred_receipt_root =
2633 exec_commitment.receipts_root.clone();
2634 let original_deferred_logs_bloom_hash =
2635 exec_commitment.logs_bloom_hash.clone();
2636
2637 let state_blame_info = self
2638 .compute_blame_and_state_with_execution_result(
2639 parent,
2640 original_deferred_state_root
2641 .aux_info
2642 .state_root_hash
2643 .clone(),
2644 original_deferred_receipt_root.clone(),
2645 original_deferred_logs_bloom_hash.clone(),
2646 )?;
2647 let block_header = self
2648 .data_man
2649 .block_header_by_hash(&self.arena[me].hash)
2650 .unwrap();
2651 let state_valid = block_header.blame() == state_blame_info.blame
2652 && *block_header.deferred_state_root()
2653 == state_blame_info.state_vec_root
2654 && *block_header.deferred_receipts_root()
2655 == state_blame_info.receipts_vec_root
2656 && *block_header.deferred_logs_bloom_hash()
2657 == state_blame_info.logs_bloom_vec_root;
2658
2659 let mut debug_recompute = false;
2660 if state_valid {
2661 debug!(
2662 "compute_state_valid_for_block(): Block {} state/blame is valid.",
2663 block_hash,
2664 );
2665 } else {
2666 warn!(
2667 "compute_state_valid_for_block(): Block {:?} state/blame is invalid! \
2668 header blame {:?}, our blame {:?}, header state_root {:?}, \
2669 our state root {:?}, header receipt_root {:?}, our receipt root {:?}, \
2670 header logs_bloom_hash {:?}, our logs_bloom_hash {:?}.",
2671 block_hash, block_header.blame(), state_blame_info.blame,
2672 block_header.deferred_state_root(), state_blame_info.state_vec_root,
2673 block_header.deferred_receipts_root(), state_blame_info.receipts_vec_root,
2674 block_header.deferred_logs_bloom_hash(), state_blame_info.logs_bloom_vec_root,
2675 );
2676 INVALID_BLAME_OR_STATE_ROOT_COUNTER.inc(1);
2677
2678 if self.inner_conf.debug_dump_dir_invalid_state_root.is_some() {
2679 debug_recompute = true;
2680 }
2681 }
2682 if let Some(debug_epoch) =
2683 &self.inner_conf.debug_invalid_state_root_epoch
2684 {
2685 if block_hash.eq(debug_epoch) {
2686 debug_recompute = true;
2687 }
2688 }
2689 if debug_recompute {
2690 if let Ok(epoch_arena_index) =
2691 self.get_deferred_state_arena_index(me)
2692 {
2693 let state_availability_lower_bound = self
2694 .data_man
2695 .state_availability_boundary
2696 .read()
2697 .lower_bound;
2698 if state_availability_lower_bound < block_height {
2701 log_invalid_state_root(
2703 epoch_arena_index,
2704 self,
2705 executor,
2706 block_hash,
2707 block_height,
2708 &original_deferred_state_root,
2709 )
2710 .ok();
2711
2712 let header_blame = block_header.blame() as u64;
2716 let mut block_arena_index = self.arena[me].parent;
2717 let mut earliest_mismatch_arena_index = me;
2718 for i in 1..header_blame {
2719 if block_height - i <= state_availability_lower_bound {
2720 break;
2721 }
2722 if self.arena[block_arena_index].data.state_valid
2723 != Some(false)
2724 {
2725 earliest_mismatch_arena_index = block_arena_index;
2726 }
2727 block_arena_index =
2728 self.arena[block_arena_index].parent;
2729 }
2730 if earliest_mismatch_arena_index != me {
2731 if let Ok(state_epoch_arena_index) = self
2732 .get_deferred_state_arena_index(
2733 earliest_mismatch_arena_index,
2734 )
2735 {
2736 let block_hash =
2737 self.arena[earliest_mismatch_arena_index].hash;
2738 let block_height = self.arena
2739 [earliest_mismatch_arena_index]
2740 .height;
2741 let state_root = self
2742 .data_man
2743 .get_epoch_execution_commitment(
2744 &self.arena[state_epoch_arena_index].hash,
2745 )
2746 .expect("Commitment exist")
2747 .state_root_with_aux_info
2748 .clone();
2749 log_invalid_state_root(
2750 state_epoch_arena_index,
2751 self,
2752 executor,
2753 block_hash,
2754 block_height,
2755 &state_root,
2756 )
2757 .ok();
2758 }
2759 }
2760 }
2761 }
2762 }
2763
2764 self.arena[me].data.state_valid = Some(state_valid);
2765 if !state_valid {
2766 self.arena[me].data.blame_info = Some(state_blame_info);
2767 }
2768
2769 if self.inner_conf.enable_state_expose {
2770 STATE_EXPOSER
2771 .consensus_graph
2772 .lock()
2773 .block_execution_state_vec
2774 .push(ConsensusGraphBlockExecutionState {
2775 block_hash,
2776 deferred_state_root: original_deferred_state_root
2777 .aux_info
2778 .state_root_hash,
2779 deferred_receipt_root: original_deferred_receipt_root,
2780 deferred_logs_bloom_hash: original_deferred_logs_bloom_hash,
2781 state_valid: self.arena[me]
2782 .data
2783 .state_valid
2784 .unwrap_or(true),
2785 })
2786 }
2787
2788 Ok(())
2789 }
2790
2791 fn compute_vote_valid_for_pivot_block(
2792 &mut self, me: usize, pivot_arena_index: usize,
2793 ) -> bool {
2794 let lca = self.lca(me, pivot_arena_index);
2795 let lca_height = self.arena[lca].height;
2796 debug!(
2797 "compute_vote_valid_for_pivot_block: lca={}, lca_height={}",
2798 lca, lca_height
2799 );
2800 let mut stack = Vec::new();
2801 stack.push((0, me, 0));
2802 while !stack.is_empty() {
2803 let (stage, index, a) = stack.pop().unwrap();
2804 if stage == 0 {
2805 if self.arena[index].data.vote_valid_lca_height != lca_height {
2806 let header = self
2807 .data_man
2808 .block_header_by_hash(&self.arena[index].hash)
2809 .unwrap();
2810 let blame = header.blame();
2811 if self.arena[index].height > lca_height + 1 + blame as u64
2812 {
2813 let ancestor = self.ancestor_at(
2814 index,
2815 self.arena[index].height - blame as u64 - 1,
2816 );
2817 stack.push((1, index, ancestor));
2818 stack.push((0, ancestor, 0));
2819 } else {
2820 let start_height =
2824 self.arena[index].height - blame as u64 - 1;
2825 let mut cur_height = lca_height;
2826 let mut cur = lca;
2827 let mut vote_valid = true;
2828 while cur_height > start_height {
2829 if self.arena[cur].data.state_valid
2830 .expect("state_valid for me has been computed in \
2831 wait_and_compute_state_valid_locked by the caller, \
2832 so the precedents should have state_valid") {
2833 vote_valid = false;
2834 break;
2835 }
2836 cur_height -= 1;
2837 cur = self.arena[cur].parent;
2838 }
2839 if vote_valid
2840 && !self.arena[cur].data.state_valid.expect(
2841 "state_valid for me has been computed in \
2842 wait_and_compute_state_valid_locked by the caller, \
2843 so the precedents should have state_valid",
2844 )
2845 {
2846 vote_valid = false;
2847 }
2848 self.arena[index].data.vote_valid_lca_height =
2849 lca_height;
2850 self.arena[index].data.vote_valid = vote_valid;
2851 }
2852 }
2853 } else {
2854 self.arena[index].data.vote_valid_lca_height = lca_height;
2855 self.arena[index].data.vote_valid =
2856 self.arena[a].data.vote_valid;
2857 }
2858 }
2859 self.arena[me].data.vote_valid
2860 }
2861
2862 fn total_weight_in_own_epoch(
2865 &self, blockset_in_own_epoch: &Vec<usize>, genesis: usize,
2866 ) -> i128 {
2867 let gen_arena_index = if genesis != NULL {
2868 genesis
2869 } else {
2870 self.cur_era_genesis_block_arena_index
2871 };
2872 let gen_height = self.arena[gen_arena_index].height;
2873 let mut total_weight = 0 as i128;
2874 for index in blockset_in_own_epoch.iter() {
2875 if gen_arena_index != self.cur_era_genesis_block_arena_index {
2876 let height = self.arena[*index].height;
2877 if height < gen_height {
2878 continue;
2879 }
2880 let era_arena_index = self.ancestor_at(*index, gen_height);
2881 if gen_arena_index != era_arena_index {
2882 continue;
2883 }
2884 }
2885 total_weight += self.block_weight(*index);
2886 }
2887 total_weight
2888 }
2889
2890 fn recompute_metadata(
2892 &mut self, start_at: u64, mut to_update: HashSet<usize>,
2893 ) {
2894 self.pivot_chain_metadata
2895 .resize_with(self.pivot_chain.len(), Default::default);
2896 let pivot_height = self.get_pivot_height();
2897 for i in start_at..pivot_height {
2898 let me = self.get_pivot_block_arena_index(i);
2899 self.arena[me].data.last_pivot_in_past = i;
2900 let i_pivot_index = self.height_to_pivot_index(i);
2901 self.pivot_chain_metadata[i_pivot_index]
2902 .last_pivot_in_past_blocks
2903 .clear();
2904 self.pivot_chain_metadata[i_pivot_index]
2905 .last_pivot_in_past_blocks
2906 .insert(me);
2907 self.pivot_chain_metadata[i_pivot_index].past_weight =
2908 if i_pivot_index > 0 {
2909 let blockset = self
2910 .exchange_or_compute_blockset_in_own_view_of_epoch(
2911 me, None,
2912 );
2913 let blockset_weight = self.total_weight_in_own_epoch(
2914 &blockset,
2915 self.cur_era_genesis_block_arena_index,
2916 );
2917 self.exchange_or_compute_blockset_in_own_view_of_epoch(
2918 me,
2919 Some(blockset),
2920 );
2921 self.pivot_chain_metadata[i_pivot_index - 1].past_weight
2922 + blockset_weight
2923 + self.block_weight(me)
2924 } else {
2925 self.block_weight(me)
2926 };
2927 to_update.remove(&me);
2928 }
2929 let mut stack = Vec::new();
2930 let to_visit = to_update.clone();
2931 for i in &to_update {
2932 stack.push((0, *i));
2933 }
2934 while !stack.is_empty() {
2935 let (stage, me) = stack.pop().unwrap();
2936 if !to_visit.contains(&me) {
2937 continue;
2938 }
2939 let parent = self.arena[me].parent;
2940 if stage == 0 {
2941 if to_update.contains(&me) {
2942 to_update.remove(&me);
2943 stack.push((1, me));
2944 stack.push((0, parent));
2945 for referee in &self.arena[me].referees {
2946 stack.push((0, *referee));
2947 }
2948 }
2949 } else if stage == 1 && me != self.cur_era_genesis_block_arena_index
2950 {
2951 let mut last_pivot = if parent == NULL {
2952 0
2953 } else {
2954 self.arena[parent].data.last_pivot_in_past
2955 };
2956 for referee in &self.arena[me].referees {
2957 let x = self.arena[*referee].data.last_pivot_in_past;
2958 last_pivot = max(last_pivot, x);
2959 }
2960 self.arena[me].data.last_pivot_in_past = last_pivot;
2961 let last_pivot_index = self.height_to_pivot_index(last_pivot);
2962 self.pivot_chain_metadata[last_pivot_index]
2963 .last_pivot_in_past_blocks
2964 .insert(me);
2965 }
2966 }
2967 }
2968
2969 fn get_timer_chain_index(&self, me: usize) -> usize {
2970 if !self.arena[me].is_timer || self.arena[me].data.partial_invalid {
2971 return NULL;
2972 }
2973 if self.arena[me].data.ledger_view_timer_chain_height
2974 < self.cur_era_genesis_timer_chain_height
2975 {
2976 return NULL;
2979 }
2980 let timer_chain_index =
2981 (self.arena[me].data.ledger_view_timer_chain_height
2982 - self.cur_era_genesis_timer_chain_height) as usize;
2983 if self.timer_chain.len() > timer_chain_index
2984 && self.timer_chain[timer_chain_index] == me
2985 {
2986 timer_chain_index
2987 } else {
2988 NULL
2989 }
2990 }
2991
2992 fn compute_timer_chain_past_view_info(
2993 &self, parent: usize, referees: &Vec<usize>,
2994 ) -> (i128, usize) {
2995 let mut timer_longest_difficulty = 0;
2996 let mut longest_referee = parent;
2997 if parent != NULL {
2998 timer_longest_difficulty =
2999 self.arena[parent].data.past_view_timer_longest_difficulty
3000 + self.get_timer_difficulty(parent);
3001 }
3002 for referee in referees {
3003 let timer_difficulty =
3004 self.arena[*referee].data.past_view_timer_longest_difficulty
3005 + self.get_timer_difficulty(*referee);
3006 if longest_referee == NULL
3007 || ConsensusGraphInner::is_heavier(
3008 (timer_difficulty, &self.arena[*referee].hash),
3009 (
3010 timer_longest_difficulty,
3011 &self.arena[longest_referee].hash,
3012 ),
3013 )
3014 {
3015 timer_longest_difficulty = timer_difficulty;
3016 longest_referee = *referee;
3017 }
3018 }
3019 let last_timer_block_arena_index = if longest_referee == NULL
3020 || self.arena[longest_referee].is_timer
3021 && !self.arena[longest_referee].data.partial_invalid
3022 {
3023 longest_referee
3024 } else {
3025 self.arena[longest_referee]
3026 .data
3027 .past_view_last_timer_block_arena_index
3028 };
3029 (timer_longest_difficulty, last_timer_block_arena_index)
3030 }
3031
3032 fn compute_timer_chain_tuple(
3033 &self, parent: usize, referees: &Vec<usize>,
3034 anticone_opt: Option<&BitSet>,
3035 ) -> (u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>) {
3036 let empty_set = BitSet::new();
3037 let anticone = if let Some(a) = anticone_opt {
3038 a
3039 } else {
3040 &empty_set
3041 };
3042 let mut tmp_chain = Vec::new();
3043 let mut tmp_chain_set = HashSet::new();
3044 let (_, past_view_last_timer_block_arena_index) =
3045 self.compute_timer_chain_past_view_info(parent, referees);
3046 let mut i = past_view_last_timer_block_arena_index;
3047 while i != NULL && self.get_timer_chain_index(i) == NULL {
3048 tmp_chain.push(i);
3049 tmp_chain_set.insert(i);
3050 i = self.arena[i].data.past_view_last_timer_block_arena_index;
3051 }
3052 tmp_chain.reverse();
3053 let fork_at;
3054 let fork_at_index;
3055 if i != NULL {
3056 fork_at = self.arena[i].data.ledger_view_timer_chain_height + 1;
3057 assert!(fork_at >= self.cur_era_genesis_timer_chain_height);
3058 fork_at_index =
3059 (fork_at - self.cur_era_genesis_timer_chain_height) as usize;
3060 } else {
3061 fork_at = self.cur_era_genesis_timer_chain_height;
3062 fork_at_index = 0;
3063 }
3064
3065 let mut res = HashMap::new();
3066 if fork_at_index < self.timer_chain.len() {
3067 debug!("New block parent = {} referees = {:?} not extending timer chain (len = {}), fork at timer chain height {}, timer chain index {}", parent, referees, self.timer_chain.len(), fork_at, fork_at_index);
3068 let start_point = if i == NULL {
3071 self.cur_era_genesis_block_arena_index
3072 } else {
3073 self.timer_chain[fork_at_index - 1]
3074 };
3075 let mut start_set = HashSet::new();
3076 start_set.insert(start_point);
3077 let visited: BitSet = get_future(
3078 start_set,
3079 |i| self.successor_edges(i as usize),
3081 |i| anticone.contains(i as u32),
3082 );
3083 let visited_in_order: Vec<usize> = topological_sort(
3084 visited,
3085 |i| {
3086 self.predecessor_edges(i as usize)
3087 .into_iter()
3088 .map(|i| i as u32)
3089 .collect()
3090 },
3091 |_| true,
3092 );
3093 for x in visited_in_order {
3094 let x = x as usize;
3095 let mut timer_chain_height = 0;
3096 for pred in &self.predecessor_edges(x) {
3097 let mut height = if let Some(v) = res.get(pred) {
3098 *v
3099 } else {
3100 self.arena[*pred].data.ledger_view_timer_chain_height
3101 };
3102 if tmp_chain_set.contains(pred)
3103 || self.get_timer_chain_index(*pred) < fork_at_index
3104 {
3105 height += 1;
3106 }
3107 if height > timer_chain_height {
3108 timer_chain_height = height;
3109 }
3110 }
3111 res.insert(x, timer_chain_height);
3112 }
3113 }
3114
3115 let mut tmp_lca = Vec::new();
3117 if tmp_chain.len() > self.timer_chain.len() - fork_at_index
3118 && self.timer_chain.len() - fork_at_index
3119 < self.inner_conf.timer_chain_beta as usize
3120 {
3121 let mut last_lca = match self.timer_chain_accumulative_lca.last() {
3122 Some(last_lca) => *last_lca,
3123 None => self.cur_era_genesis_block_arena_index,
3124 };
3125 let s = max(
3126 self.timer_chain.len(),
3127 self.inner_conf.timer_chain_beta as usize,
3128 );
3129 let e =
3130 min(tmp_chain.len(), self.inner_conf.timer_chain_beta as usize);
3131 for i in s..(fork_at_index + e) {
3132 let end = i - self.inner_conf.timer_chain_beta as usize;
3136 if end < self.inner_conf.timer_chain_beta as usize {
3137 tmp_lca.push(self.cur_era_genesis_block_arena_index);
3138 continue;
3139 }
3140 let mut lca = self.timer_chain[end];
3141 for j in
3142 (end - self.inner_conf.timer_chain_beta as usize + 1)..end
3143 {
3144 if lca == NULL {
3149 break;
3150 }
3151 lca = self.lca(lca, self.timer_chain[j]);
3152 }
3153 if lca != NULL
3163 && self.arena[last_lca].height < self.arena[lca].height
3164 {
3165 last_lca = lca;
3166 }
3167 tmp_lca.push(last_lca);
3168 }
3169 }
3170 if tmp_chain.len() > self.inner_conf.timer_chain_beta as usize {
3171 let mut last_lca = if let Some(lca) = tmp_lca.last() {
3172 *lca
3173 } else if fork_at_index == 0 {
3174 self.cur_era_genesis_block_arena_index
3175 } else {
3176 self.timer_chain_accumulative_lca[fork_at_index - 1]
3182 };
3183 for i in 0..(tmp_chain.len()
3184 - (self.inner_conf.timer_chain_beta as usize))
3185 {
3186 if fork_at_index + i + 1
3187 < self.inner_conf.timer_chain_beta as usize
3188 {
3189 tmp_lca.push(self.cur_era_genesis_block_arena_index)
3190 } else {
3191 let mut lca = tmp_chain[i];
3192 let s = if i < self.inner_conf.timer_chain_beta as usize - 1
3194 {
3195 0
3196 } else {
3197 i + 1 - self.inner_conf.timer_chain_beta as usize
3198 };
3199 for j in s..i {
3200 if lca == NULL {
3205 break;
3206 }
3207 lca = self.lca(lca, tmp_chain[j]);
3208 }
3209 for j in (fork_at_index + i + 1
3210 - self.inner_conf.timer_chain_beta as usize)
3211 ..fork_at_index
3212 {
3213 if lca == NULL {
3218 break;
3219 }
3220 lca = self.lca(lca, self.timer_chain[j]);
3221 }
3222 if lca != NULL
3232 && self.arena[last_lca].height < self.arena[lca].height
3233 {
3234 last_lca = lca;
3235 }
3236 tmp_lca.push(last_lca);
3237 }
3238 }
3239 }
3240
3241 (fork_at, res, tmp_lca, tmp_chain)
3242 }
3243
3244 fn update_timer_chain(&mut self, me: usize) {
3245 let (fork_at, res, extra_lca, tmp_chain) = self
3246 .compute_timer_chain_tuple(
3247 self.arena[me].parent,
3248 &self.arena[me].referees,
3249 None,
3250 );
3251
3252 let fork_at_index =
3253 (fork_at - self.cur_era_genesis_timer_chain_height) as usize;
3254 self.timer_chain.resize(fork_at_index + tmp_chain.len(), 0);
3255 let new_chain_lca_size = if self.timer_chain.len()
3256 > self.inner_conf.timer_chain_beta as usize
3257 {
3258 self.timer_chain.len() - self.inner_conf.timer_chain_beta as usize
3259 } else {
3260 0
3261 };
3262 self.timer_chain_accumulative_lca
3263 .resize(new_chain_lca_size, 0);
3264 for i in 0..tmp_chain.len() {
3265 self.timer_chain[fork_at_index + i] = tmp_chain[i];
3266 }
3267 for i in 0..extra_lca.len() {
3268 self.timer_chain_accumulative_lca
3269 [new_chain_lca_size - extra_lca.len() + i] = extra_lca[i];
3270 }
3271 if !res.contains_key(&me) {
3274 assert!(
3275 self.cur_era_genesis_timer_chain_height
3276 + self.timer_chain.len() as u64
3277 == fork_at
3278 );
3279 self.arena[me].data.ledger_view_timer_chain_height = fork_at;
3280 }
3281 for (k, v) in res {
3282 self.arena[k].data.ledger_view_timer_chain_height = v;
3283 }
3284 if self.arena[me].is_timer && !self.arena[me].data.partial_invalid {
3285 self.timer_chain.push(me);
3286 if self.timer_chain.len()
3287 >= 2 * self.inner_conf.timer_chain_beta as usize
3288 {
3289 let s = self.timer_chain.len()
3290 - 2 * self.inner_conf.timer_chain_beta as usize;
3291 let e = self.timer_chain.len()
3292 - self.inner_conf.timer_chain_beta as usize;
3293 let mut lca = self.timer_chain[e - 1];
3294 for i in s..(e - 1) {
3295 if lca == NULL {
3300 break;
3301 }
3302 lca = self.lca(lca, self.timer_chain[i]);
3303 }
3304 let last_lca =
3305 if let Some(x) = self.timer_chain_accumulative_lca.last() {
3306 *x
3307 } else {
3308 self.cur_era_genesis_block_arena_index
3309 };
3310 if lca != NULL
3320 && self.arena[last_lca].height < self.arena[lca].height
3321 {
3322 self.timer_chain_accumulative_lca.push(lca);
3323 } else {
3324 self.timer_chain_accumulative_lca.push(last_lca);
3325 }
3326 assert_eq!(
3327 self.timer_chain_accumulative_lca.len(),
3328 self.timer_chain.len()
3329 - self.inner_conf.timer_chain_beta as usize
3330 );
3331 } else if self.timer_chain.len()
3332 > self.inner_conf.timer_chain_beta as usize
3333 {
3334 self.timer_chain_accumulative_lca
3335 .push(self.cur_era_genesis_block_arena_index);
3336 }
3337 }
3338 debug!(
3339 "Timer chain updated to {:?} accumulated lca {:?}",
3340 self.timer_chain, self.timer_chain_accumulative_lca
3341 );
3342 }
3343
3344 pub fn total_processed_block_count(&self) -> u64 {
3345 self.sequence_number_of_block_entrance
3346 }
3347
3348 pub fn get_trusted_blame_block(
3349 &self, checkpoint_hash: &H256, plus_depth: usize,
3350 ) -> Option<H256> {
3351 let arena_index_opt = self.hash_to_arena_indices.get(checkpoint_hash);
3352 if arena_index_opt.is_none() {
3354 debug!(
3355 "get_trusted_blame_block: block {:?} not in consensus",
3356 checkpoint_hash
3357 );
3358 return None;
3359 }
3360 let arena_index = *arena_index_opt.unwrap();
3361 let pivot_index =
3362 self.height_to_pivot_index(self.arena[arena_index].height);
3363 if pivot_index >= self.pivot_chain.len()
3365 || self.pivot_chain[pivot_index] != arena_index
3366 {
3367 debug!(
3368 "get_trusted_blame_block: block {:?} not on pivot chain",
3369 checkpoint_hash
3370 );
3371 return None;
3372 }
3373 self.find_first_index_with_correct_state_of(
3374 pivot_index + plus_depth,
3375 None, 0, )
3378 .and_then(|index| Some(self.arena[self.pivot_chain[index]].hash))
3379 }
3380
3381 pub fn get_trusted_blame_block_for_snapshot(
3383 &self, snapshot_epoch_id: &EpochId,
3384 ) -> Option<H256> {
3385 self.get_trusted_blame_block(
3386 snapshot_epoch_id,
3387 self.data_man.get_snapshot_blame_plus_depth(),
3388 )
3389 }
3390
3391 pub fn get_to_sync_epoch_id(&self) -> EpochId {
3393 let height_to_sync = self.latest_snapshot_height();
3394 let epoch_to_sync = self.arena
3396 [self.pivot_chain[self.height_to_pivot_index(height_to_sync)]]
3397 .hash;
3398 epoch_to_sync
3399 }
3400
3401 fn latest_snapshot_height(&self) -> u64 { self.cur_era_stable_height }
3404
3405 fn collect_defer_blocks_missing_execution_commitments(
3406 &self, me: usize,
3407 ) -> Result<Vec<H256>, String> {
3408 let mut cur = self.get_deferred_state_arena_index(me)?;
3409 let mut waiting_blocks = Vec::new();
3410 debug!(
3411 "collect_blocks_missing_execution_commitments: me={}, height={}",
3412 me, self.arena[me].height
3413 );
3414 let state_boundary_height =
3417 self.data_man.state_availability_boundary.read().lower_bound;
3418 loop {
3419 let deferred_block_hash = self.arena[cur].hash;
3420
3421 if self
3422 .data_man
3423 .get_epoch_execution_commitment(&deferred_block_hash)
3424 .is_some()
3425 || self.arena[cur].height <= state_boundary_height
3426 {
3427 break;
3430 }
3431 waiting_blocks.push(deferred_block_hash);
3432 cur = self.arena[cur].parent;
3433 }
3434 waiting_blocks.reverse();
3435 Ok(waiting_blocks)
3436 }
3437
3438 fn compute_state_valid_and_blame_info(
3440 &mut self, me: usize, executor: &ConsensusExecutor,
3441 ) -> Result<(), String> {
3442 let mut blocks_to_compute = Vec::new();
3445 let mut cur = me;
3446 loop {
3451 if self.arena[cur].data.state_valid.is_some() {
3452 break;
3453 }
3454 blocks_to_compute.push(cur);
3460 cur = self.arena[cur].parent;
3461 }
3462 blocks_to_compute.reverse();
3463
3464 for index in blocks_to_compute {
3465 self.compute_state_valid_and_blame_info_for_block(index, executor)?;
3466 }
3467 Ok(())
3468 }
3469
3470 fn split_root(&mut self, me: usize) {
3471 let parent = self.arena[me].parent;
3472 assert!(parent != NULL);
3473 self.weight_tree.split_root(parent, me);
3474 self.adaptive_tree.split_root(parent, me);
3475 self.arena[me].parent = NULL;
3476 }
3477
3478 pub fn reset_epoch_number_in_epoch(&mut self, pivot_arena_index: usize) {
3479 self.set_epoch_number_in_epoch(pivot_arena_index, NULLU64);
3480 }
3481
3482 fn set_epoch_number_in_epoch(
3483 &mut self, pivot_arena_index: usize, epoch_number: u64,
3484 ) {
3485 assert!(!self.arena[pivot_arena_index].data.blockset_cleared);
3486 let block_set = self.exchange_or_compute_blockset_in_own_view_of_epoch(
3487 pivot_arena_index,
3488 None,
3489 );
3490 for idx in &block_set {
3491 self.arena[*idx].data.epoch_number = epoch_number
3492 }
3493 self.exchange_or_compute_blockset_in_own_view_of_epoch(
3494 pivot_arena_index,
3495 Some(block_set),
3496 );
3497 self.arena[pivot_arena_index].data.epoch_number = epoch_number;
3498 }
3499
3500 fn get_deferred_state_arena_index(
3501 &self, me: usize,
3502 ) -> Result<usize, String> {
3503 let height = self.arena[me].height;
3504 if height <= DEFERRED_STATE_EPOCH_COUNT {
3508 return Ok(self.cur_era_genesis_block_arena_index);
3509 }
3510 if self.cur_era_genesis_height + DEFERRED_STATE_EPOCH_COUNT > height {
3512 return Err(
3513 "Parent is too old for computing the deferred state".to_owned()
3514 );
3515 }
3516 let target_height = height - DEFERRED_STATE_EPOCH_COUNT;
3517 let pivot_idx = self.height_to_pivot_index(height);
3518 if pivot_idx < self.pivot_chain.len()
3520 && self.pivot_chain[pivot_idx] == me
3521 {
3522 return Ok(
3523 self.pivot_chain[self.height_to_pivot_index(target_height)]
3524 );
3525 } else {
3526 return Ok(self.ancestor_at(me, target_height));
3527 }
3528 }
3529
3530 pub fn recover_state_valid(&mut self) {
3534 let start_pivot_index =
3537 (self.data_man.state_availability_boundary.read().lower_bound
3538 - self.cur_era_genesis_height) as usize;
3539 if start_pivot_index >= self.pivot_chain.len() {
3540 return;
3543 }
3544 let start_epoch_hash =
3545 self.arena[self.pivot_chain[start_pivot_index]].hash;
3546 let maybe_trusted_blame_block =
3550 self.get_trusted_blame_block(&start_epoch_hash, 0);
3551 debug!("recover_state_valid: checkpoint={:?}, maybe_trusted_blame_block={:?}", start_epoch_hash, maybe_trusted_blame_block);
3552
3553 if let Some(trusted_blame_block) = maybe_trusted_blame_block {
3556 let mut cur = *self
3557 .hash_to_arena_indices
3558 .get(&trusted_blame_block)
3559 .unwrap();
3560 while cur != NULL {
3561 let blame = self
3562 .data_man
3563 .block_header_by_hash(&self.arena[cur].hash)
3564 .unwrap()
3565 .blame();
3566 for i in 0..blame + 1 {
3567 self.arena[cur].data.state_valid = Some(i == 0);
3568 trace!(
3569 "recover_state_valid: index={} hash={} state_valid={}",
3570 cur,
3571 self.arena[cur].hash,
3572 i == 0
3573 );
3574 cur = self.arena[cur].parent;
3575 if cur == NULL {
3576 break;
3577 }
3578 }
3579 }
3580 } else {
3581 if start_epoch_hash != self.data_man.true_genesis.hash() {
3582 error!(
3583 "Fail to recover state_valid: start_epoch_hash={:?}",
3584 start_epoch_hash
3585 );
3586 }
3587 }
3588 }
3589
3590 pub fn block_node(&self, block_hash: &H256) -> Option<&ConsensusGraphNode> {
3591 self.hash_to_arena_indices
3592 .get(block_hash)
3593 .and_then(|arena_index| self.arena.get(*arena_index))
3594 }
3595
3596 pub fn best_terminals(
3601 &mut self, best_index: usize, ref_bound: usize,
3602 ) -> Vec<H256> {
3603 let pastset_tmp;
3604 let pastset = if let Some(s) = self.pastset_cache.get(best_index) {
3605 s
3606 } else {
3607 pastset_tmp = self.compute_pastset_brutal(best_index);
3608 &pastset_tmp
3609 };
3610
3611 let lca_height_cache = mem::replace(
3612 &mut self.best_terminals_lca_height_cache,
3613 Default::default(),
3614 );
3615
3616 let mut counter_map = FastHashMap::new();
3619 let mut queue = BinaryHeap::new();
3620 for hash in self.terminal_hashes.iter() {
3621 let a_idx = self.hash_to_arena_indices.get(hash).unwrap();
3622 let mut a_lca_height = NULLU64;
3623 if let Some(h) = lca_height_cache.get(a_idx) {
3624 if *h < self.best_terminals_reorg_height {
3625 a_lca_height = *h;
3626 }
3627 }
3628 if a_lca_height == NULLU64 {
3629 let a_lca = self.lca(*a_idx, best_index);
3630 a_lca_height = self.arena[a_lca].height;
3631 }
3632 self.best_terminals_lca_height_cache
3633 .insert(*a_idx, a_lca_height);
3634 queue.push((-(a_lca_height as i128), *a_idx));
3635 }
3636
3637 while queue.len() > ref_bound
3649 || queue
3650 .peek()
3651 .map_or(false, |(v, _)| *v == -(NULLU64 as i128))
3652 {
3653 let (_, idx) = queue.pop().unwrap();
3654 let parent = self.arena[idx].parent;
3655 if parent != NULL {
3656 if let Some(p) = counter_map.get_mut(&parent) {
3657 *p = *p + 1;
3658 } else if !pastset.contains(parent as u32) {
3659 counter_map.insert(parent, 1);
3660 }
3661 if let Some(p) = counter_map.get(&parent) {
3662 if *p
3663 == self.arena[parent].children.len()
3664 + self.arena[parent].referrers.len()
3665 {
3666 if self.arena[parent].era_block == NULL {
3671 queue.push((-(NULLU64 as i128), parent));
3672 } else {
3673 let mut a_lca_height = NULLU64;
3674 if let Some(h) = lca_height_cache.get(&parent) {
3675 if *h < self.best_terminals_reorg_height {
3676 a_lca_height = *h;
3677 }
3678 }
3679 if a_lca_height == NULLU64 {
3680 let a_lca = self.lca(parent, best_index);
3681 a_lca_height = self.arena[a_lca].height;
3682 }
3683 self.best_terminals_lca_height_cache
3684 .insert(parent, a_lca_height);
3685 queue.push((-(a_lca_height as i128), parent));
3686 }
3687 }
3688 }
3689 }
3690 for referee in &self.arena[idx].referees {
3691 if let Some(p) = counter_map.get_mut(referee) {
3692 *p = *p + 1;
3693 } else if !pastset.contains(*referee as u32) {
3694 counter_map.insert(*referee, 1);
3695 }
3696 if let Some(p) = counter_map.get(referee) {
3697 if *p
3698 == self.arena[*referee].children.len()
3699 + self.arena[*referee].referrers.len()
3700 {
3701 if self.arena[*referee].era_block == NULL {
3706 queue.push((-(NULLU64 as i128), *referee));
3707 } else {
3708 let mut a_lca_height = NULLU64;
3709 if let Some(h) = lca_height_cache.get(referee) {
3710 if *h < self.best_terminals_reorg_height {
3711 a_lca_height = *h;
3712 }
3713 }
3714 if a_lca_height == NULLU64 {
3715 let a_lca = self.lca(*referee, best_index);
3716 a_lca_height = self.arena[a_lca].height;
3717 }
3718 self.best_terminals_lca_height_cache
3719 .insert(*referee, a_lca_height);
3720 queue.push((-(a_lca_height as i128), *referee));
3721 }
3722 }
3723 }
3724 }
3725 }
3726 self.best_terminals_reorg_height = NULLU64;
3727 let bounded_hashes =
3728 queue.iter().map(|(_, b)| self.arena[*b].hash).collect();
3729 bounded_hashes
3730 }
3731
3732 pub fn finish_block_recovery(&mut self) { self.header_only = false; }
3733
3734 pub fn get_pivot_chain_and_weight(
3735 &self, height_range: Option<(u64, u64)>,
3736 ) -> Result<Vec<(H256, U256)>, String> {
3737 let min_height = self.get_cur_era_genesis_height();
3738 let max_height = self.arena[*self.pivot_chain.last().unwrap()].height;
3739 let (start, end) = height_range.unwrap_or((min_height, max_height));
3740 if start < min_height || end > max_height {
3741 bail!(
3742 "height_range out of bound: requested={:?} min={} max={}",
3743 height_range,
3744 min_height,
3745 max_height
3746 );
3747 }
3748
3749 let mut chain = Vec::new();
3750 for i in start..=end {
3751 let pivot_arena_index = self.get_pivot_block_arena_index(i);
3752 chain.push((
3753 self.arena[pivot_arena_index].hash.into(),
3754 (self.weight_tree.get(pivot_arena_index) as u64).into(),
3755 ));
3756 }
3757 Ok(chain)
3758 }
3759
3760 pub fn get_subtree(&self, root_block: &H256) -> Option<Vec<H256>> {
3762 let root_arena_index = *self.hash_to_arena_indices.get(root_block)?;
3763 let mut queue = VecDeque::new();
3764 let mut subtree = Vec::new();
3765 queue.push_back(root_arena_index);
3766 while let Some(i) = queue.pop_front() {
3767 subtree.push(self.arena[i].hash);
3768 for child in &self.arena[i].children {
3769 queue.push_back(*child);
3770 }
3771 }
3772 Some(subtree)
3773 }
3774
3775 pub fn get_next_pivot_decision(
3776 &self, parent_decision_hash: &H256, confirmed_height: u64,
3777 ) -> Option<(u64, H256)> {
3778 let r = match self.hash_to_arena_indices.get(parent_decision_hash) {
3779 None => {
3780 let new_decision_height = (confirmed_height.saturating_sub(
3784 self.inner_conf
3785 .pos_pivot_decision_defer_epoch_count(confirmed_height),
3786 )) / POS_TERM_EPOCHS
3787 * POS_TERM_EPOCHS;
3788 if new_decision_height <= self.cur_era_genesis_height {
3789 None
3790 } else {
3791 let new_decision_arena_index =
3792 self.get_pivot_block_arena_index(new_decision_height);
3793 Some((
3794 self.arena[new_decision_arena_index].height,
3795 self.arena[new_decision_arena_index].hash,
3796 ))
3797 }
3798 }
3799 Some(parent_decision) => {
3800 let parent_decision_height =
3801 self.arena[*parent_decision].height;
3802 assert_eq!(parent_decision_height % POS_TERM_EPOCHS, 0);
3803 if self.get_pivot_block_arena_index(parent_decision_height)
3805 == *parent_decision
3806 {
3807 let new_decision_height =
3810 (confirmed_height.saturating_sub(
3811 self.inner_conf
3812 .pos_pivot_decision_defer_epoch_count(
3813 confirmed_height,
3814 ),
3815 )) / POS_TERM_EPOCHS
3816 * POS_TERM_EPOCHS;
3817 if new_decision_height <= parent_decision_height {
3818 None
3819 } else {
3820 let new_decision_arena_index = self
3821 .get_pivot_block_arena_index(new_decision_height);
3822 Some((
3823 self.arena[new_decision_arena_index].height,
3824 self.arena[new_decision_arena_index].hash,
3825 ))
3826 }
3827 } else {
3828 None
3829 }
3830 }
3831 };
3832 debug!(
3833 "next_pivot_decision: parent={:?} return={:?}",
3834 parent_decision_hash, r
3835 );
3836 r
3837 }
3838
3839 pub fn validate_pivot_decision(
3840 &self, ancestor_hash: &H256, me_hash: &H256,
3841 ) -> bool {
3842 if ancestor_hash == me_hash {
3843 return true;
3844 }
3845 debug!(
3846 "validate_pivot_decision: ancestor={:?}, me={:?}",
3847 ancestor_hash, me_hash
3848 );
3849 match (
3850 self.hash_to_arena_indices.get(ancestor_hash),
3851 self.hash_to_arena_indices.get(me_hash),
3852 ) {
3853 (Some(ancestor), Some(me)) => {
3854 if self.arena[*me].height % POS_TERM_EPOCHS != 0 {
3855 return false;
3856 }
3857 self.ancestor_at(*me, self.arena[*ancestor].height) == *ancestor
3859 }
3860 (_, Some(_me)) => {
3866 if !self.header_only {
3868 warn!(
3869 "ancestor not in consensus graph: processed={}",
3870 self.pivot_block_processed(ancestor_hash)
3871 );
3872 }
3873 true
3876 }
3877 (_, _) => {
3878 if !self.header_only {
3880 warn!("ancestor and me are both not in consensus graph, processed={} {}", self.pivot_block_processed(ancestor_hash), self.pivot_block_processed(me_hash));
3881 }
3882 true
3883 }
3884 }
3885 }
3886
3887 fn get_pos_reference_pivot_decision(
3890 &self, block_hash: &H256,
3891 ) -> Result<H256, String> {
3892 let pos_reference = self
3893 .data_man
3894 .pos_reference_by_hash(block_hash)
3895 .ok_or("header exist".to_string())?
3896 .ok_or("pos reference checked in sync graph".to_string())?;
3897 self.pos_verifier
3898 .get_pivot_decision(&pos_reference)
3899 .ok_or("pos validity checked in sync graph".to_string())
3900 }
3901
3902 fn update_pos_pivot_decision(&mut self, me: usize) {
3903 let h = self.arena[me].hash;
3904 if let Ok(pivot_decision) = self.get_pos_reference_pivot_decision(&h) {
3905 let pivot_decision_height = self
3906 .data_man
3907 .block_height_by_hash(&pivot_decision)
3908 .expect("pos_reference checked");
3909 if pivot_decision_height > self.best_pos_pivot_decision.1 {
3910 self.best_pos_pivot_decision =
3911 (pivot_decision, pivot_decision_height);
3912 }
3913 }
3914 }
3915
3916 pub fn choose_correct_parent(
3919 &mut self, parent_arena_index: usize, referee_indices: Vec<usize>,
3920 pos_reference: Option<PosBlockId>,
3921 ) -> usize {
3922 let parent_anticone_opt = self.anticone_cache.get(parent_arena_index);
3924 let mut anticone;
3925 if parent_anticone_opt.is_none() {
3926 anticone = consensus_new_block_handler::ConsensusNewBlockHandler::compute_anticone_bruteforce(
3927 self, parent_arena_index,
3928 );
3929 for i in self.compute_future_bitset(parent_arena_index) {
3930 anticone.add(i);
3931 }
3932 } else {
3933 anticone = self.compute_future_bitset(parent_arena_index);
3934 for index in parent_anticone_opt.unwrap() {
3935 anticone.add(*index as u32);
3936 }
3937 }
3938 let mut my_past = BitSet::new();
3939 let mut queue: VecDeque<usize> = VecDeque::new();
3940 for index in &referee_indices {
3941 queue.push_back(*index);
3942 }
3943 while let Some(index) = queue.pop_front() {
3944 if my_past.contains(index as u32) {
3945 continue;
3946 }
3947 my_past.add(index as u32);
3948 let idx_parent = self.arena[index].parent;
3949 if idx_parent != NULL {
3950 if anticone.contains(idx_parent as u32)
3951 || self.arena[idx_parent].era_block == NULL
3952 {
3953 queue.push_back(idx_parent);
3954 }
3955 }
3956 for referee in &self.arena[index].referees {
3957 if anticone.contains(*referee as u32)
3958 || self.arena[*referee].era_block == NULL
3959 {
3960 queue.push_back(*referee);
3961 }
3962 }
3963 }
3964 for index in my_past.drain() {
3965 anticone.remove(index);
3966 }
3967
3968 let mut anticone_barrier = BitSet::new();
3969 for index in (&anticone).iter() {
3970 let parent = self.arena[index as usize].parent as u32;
3971 if self.arena[index as usize].era_block != NULL
3972 && !anticone.contains(parent)
3973 {
3974 anticone_barrier.add(index);
3975 }
3976 }
3977
3978 let timer_chain_tuple = self.compute_timer_chain_tuple(
3979 parent_arena_index,
3980 &referee_indices,
3981 Some(&anticone),
3982 );
3983
3984 self.choose_correct_parent_impl(
3985 parent_arena_index,
3986 &anticone_barrier,
3987 &timer_chain_tuple,
3988 pos_reference,
3989 )
3990 }
3991
3992 fn choose_correct_parent_impl(
3993 &mut self, parent: usize, anticone_barrier: &BitSet,
3994 timer_chain_tuple: &(u64, HashMap<usize, u64>, Vec<usize>, Vec<usize>),
3995 pos_reference: Option<PosBlockId>,
3996 ) -> usize {
3997 let force_confirm =
3998 self.compute_block_force_confirm(timer_chain_tuple, pos_reference);
3999 let force_confirm_height = self.arena[force_confirm].height;
4000
4001 if self.ancestor_at(parent, force_confirm_height) == force_confirm {
4002 return parent;
4004 }
4005
4006 let mut weight_delta = HashMap::new();
4007
4008 for index in anticone_barrier.iter() {
4009 assert!(!self.is_legacy_block(index as usize));
4010 weight_delta
4011 .insert(index as usize, self.weight_tree.get(index as usize));
4012 }
4013
4014 for (index, delta) in &weight_delta {
4015 self.weight_tree.path_apply(*index, -*delta);
4016 }
4017
4018 let mut new_parent = force_confirm;
4019 while !self.arena[new_parent].children.is_empty() {
4022 let mut children = self.arena[new_parent].children.clone();
4023 let mut pivot = children.pop().expect("non-empty");
4024 for child in children {
4025 if ConsensusGraphInner::is_heavier(
4026 (self.weight_tree.get(child), &self.arena[child].hash),
4027 (self.weight_tree.get(pivot), &self.arena[pivot].hash),
4028 ) {
4029 pivot = child;
4030 }
4031 }
4032 new_parent = pivot;
4033 }
4034
4035 for (index, delta) in &weight_delta {
4036 self.weight_tree.path_apply(*index, *delta);
4037 }
4038
4039 new_parent
4040 }
4041
4042 pub fn pivot_block_processed(&self, pivot_hash: &H256) -> bool {
4043 if let Some(height) = self.data_man.block_height_by_hash(pivot_hash) {
4044 if let Ok(epoch_hash) =
4045 self.get_pivot_hash_from_epoch_number(height)
4046 {
4047 if epoch_hash == *pivot_hash {
4048 return true;
4049 } else {
4050 debug!(
4051 "pivot_block_processed: {:?} is not on pivot chain",
4052 pivot_hash
4053 );
4054 }
4055 } else {
4056 debug!("pivot_block_processed: epoch not processed height={:?} pivot={:?}", height, pivot_hash);
4057 }
4058 } else {
4059 debug!("pivot_block_processed: {:?} is not processed", pivot_hash);
4060 }
4061 false
4062 }
4063
4064 pub fn is_confirmed_by_pos(&self, block_hash: &H256) -> bool {
4072 let epoch_number = match self.get_block_epoch_number(block_hash) {
4073 Some(epoch_number) => epoch_number,
4074 None => match self
4075 .data_man
4076 .block_execution_result_by_hash_from_db(block_hash)
4077 {
4078 Some(r) => {
4079 let epoch_hash = r.0;
4080 self.data_man
4081 .block_height_by_hash(&epoch_hash)
4082 .expect("executed header exists")
4083 }
4084 None => return false,
4086 },
4087 };
4088 epoch_number <= self.best_pos_pivot_decision.1
4090 }
4091
4092 pub fn latest_epoch_confirmed_by_pos(&self) -> &(H256, u64) {
4094 &self.best_pos_pivot_decision
4095 }
4096}
4097
4098impl pow::ConsensusProvider for &ConsensusGraphInner {
4099 fn num_blocks_in_epoch(&self, h: &H256) -> u64 {
4100 let index = self.hash_to_arena_indices.get(h).unwrap(); let parent = self.arena[*index].parent;
4102 (self.arena[*index].past_num_blocks
4103 - self.arena[parent].past_num_blocks) as u64
4104 }
4105
4106 fn block_header_by_hash(&self, hash: &H256) -> Option<Arc<BlockHeader>> {
4107 self.data_man.block_header_by_hash(hash)
4108 }
4109}
4110
4111impl Graph for ConsensusGraphInner {
4112 type NodeIndex = usize;
4113}
4114
4115impl TreeGraph for ConsensusGraphInner {
4116 fn parent(&self, node_index: Self::NodeIndex) -> Option<Self::NodeIndex> {
4117 if self.arena[node_index].parent != NULL {
4118 Some(self.arena[node_index].parent)
4119 } else {
4120 None
4121 }
4122 }
4123
4124 fn referees(&self, node_index: Self::NodeIndex) -> Vec<Self::NodeIndex> {
4125 self.arena[node_index].referees.clone()
4126 }
4127}
4128
4129impl RichTreeGraph for ConsensusGraphInner {
4130 fn children(&self, node_index: Self::NodeIndex) -> Vec<Self::NodeIndex> {
4131 self.arena[node_index].children.clone()
4132 }
4133
4134 fn referrers(&self, node_index: Self::NodeIndex) -> Vec<Self::NodeIndex> {
4135 self.arena[node_index].referrers.clone()
4136 }
4137}
4138
4139impl StateMaintenanceTrait for ConsensusGraphInner {
4140 fn get_pivot_hash_from_epoch_number(
4141 &self, epoch_number: u64,
4142 ) -> Result<EpochId, String> {
4143 ConsensusGraphInner::get_pivot_hash_from_epoch_number(
4144 self,
4145 epoch_number,
4146 )
4147 .map_err(|e| e.to_string())
4148 }
4149
4150 fn get_epoch_execution_commitment_with_db(
4151 &self, block_hash: &EpochId,
4152 ) -> Option<EpochExecutionCommitment> {
4153 self.data_man
4154 .get_epoch_execution_commitment_with_db(block_hash)
4155 }
4156}