cfxcore/consensus/consensus_graph/
best_info_provider.rs

1use super::ConsensusGraph;
2
3use cfx_types::{AllChainID, H256, U256};
4use malloc_size_of_derive::MallocSizeOf as DeriveMallocSizeOf;
5use metrics::{Gauge, GaugeUsize};
6use std::sync::Arc;
7
8lazy_static! {
9    static ref BEST_EPOCH_NUMBER: Arc<dyn Gauge<usize>> =
10        GaugeUsize::register_with_group("graph_statistic", "best_epoch_number");
11}
12
13#[derive(Default, Debug, DeriveMallocSizeOf)]
14pub struct BestInformation {
15    pub chain_id: AllChainID,
16    pub best_block_hash: H256,
17    pub best_epoch_number: u64,
18    pub current_difficulty: U256,
19    pub bounded_terminal_block_hashes: Vec<H256>,
20    pub best_block_number: u64,
21}
22
23impl BestInformation {
24    pub fn best_chain_id(&self) -> AllChainID { self.chain_id }
25}
26
27impl ConsensusGraph {
28    pub fn best_info(&self) -> Arc<BestInformation> {
29        self.best_info.read_recursive().clone()
30    }
31
32    pub fn best_epoch_number(&self) -> u64 {
33        self.best_info.read_recursive().best_epoch_number
34    }
35
36    pub fn latest_checkpoint_epoch_number(&self) -> u64 {
37        self.data_man
38            .block_height_by_hash(
39                &self.data_man.get_cur_consensus_era_genesis_hash(),
40            )
41            .expect("header for cur_era_genesis should exist")
42    }
43
44    pub fn latest_confirmed_epoch_number(&self) -> u64 {
45        self.confirmation_meter.get_confirmed_epoch_num()
46    }
47
48    pub fn latest_finalized_epoch_number(&self) -> u64 {
49        self.inner
50            .read_recursive()
51            .latest_epoch_confirmed_by_pos()
52            .1
53    }
54
55    pub fn best_chain_id(&self) -> AllChainID {
56        self.best_info.read_recursive().best_chain_id()
57    }
58
59    pub fn best_block_hash(&self) -> H256 {
60        self.best_info.read_recursive().best_block_hash
61    }
62
63    /// This function is called after a new block appended to the
64    /// ConsensusGraph. Because BestInformation is often queried outside. We
65    /// store a version of best_info outside the inner to prevent keep
66    /// getting inner locks.
67    /// If `ready_for_mining` is `false`, the terminal information will not be
68    /// needed, so we do not compute bounded terminals in this case.
69    pub(super) fn update_best_info(&self, ready_for_mining: bool) {
70        let mut inner = self.inner.write();
71        let mut best_info = self.best_info.write();
72
73        let bounded_terminal_block_hashes = if ready_for_mining {
74            inner.bounded_terminal_block_hashes(self.config.referee_bound)
75        } else {
76            // `bounded_terminal` is only needed for mining and serve syncing.
77            // As the computation cost is high, we do not compute it when we are
78            // catching up because we cannot mine blocks in
79            // catching-up phases. Use `best_block_hash` to
80            // represent terminals here to remain consistent.
81            vec![inner.best_block_hash()]
82        };
83        let best_epoch_number = inner.best_epoch_number();
84        BEST_EPOCH_NUMBER.update(best_epoch_number as usize);
85        *best_info = Arc::new(BestInformation {
86            chain_id: self
87                .config
88                .chain_id
89                .read()
90                .get_chain_id(best_epoch_number),
91            best_block_hash: inner.best_block_hash(),
92            best_block_number: inner.best_block_number(),
93            best_epoch_number,
94            current_difficulty: inner.current_difficulty,
95            bounded_terminal_block_hashes,
96        });
97        debug!("update_best_info to {:?}", best_info);
98    }
99}