1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
use super::ConsensusGraph;

use crate::consensus::consensus_inner::{ConsensusGraphInner, StateBlameInfo};

use cfx_parameters::consensus::*;
use cfx_types::{H256, U256};
use primitives::pos::PosBlockId;

use std::{thread::sleep, time::Duration};

impl ConsensusGraph {
    /// Determine whether the next mined block should have adaptive weight or
    /// not
    pub fn check_mining_adaptive_block(
        &self, inner: &mut ConsensusGraphInner, parent_hash: &H256,
        referees: &Vec<H256>, difficulty: &U256,
        pos_reference: Option<PosBlockId>,
    ) -> bool {
        let parent_index =
            *inner.hash_to_arena_indices.get(parent_hash).expect(
                "parent_hash is the pivot chain tip,\
                 so should still exist in ConsensusInner",
            );
        let referee_indices: Vec<_> = referees
            .iter()
            .map(|h| {
                *inner
                    .hash_to_arena_indices
                    .get(h)
                    .expect("Checked by the caller")
            })
            .collect();
        inner.check_mining_adaptive_block(
            parent_index,
            referee_indices,
            *difficulty,
            pos_reference,
        )
    }

    /// Wait for the generation and the execution completion of a block in the
    /// consensus graph. This API is used mainly for testing purpose
    pub fn wait_for_generation(&self, hash: &H256) {
        while !self
            .inner
            .read_recursive()
            .hash_to_arena_indices
            .contains_key(hash)
        {
            sleep(Duration::from_millis(1));
        }
        let best_state_block =
            self.inner.read_recursive().best_state_block_hash();
        match self.executor.wait_for_result(best_state_block) {
            Ok(_) => (),
            Err(msg) => warn!("wait_for_generation() gets the following error from the ConsensusExecutor: {}", msg)
        }
        // Ensure that `best_info` has been updated when this returns, so if we
        // are calling RPCs to generate many blocks, they will form a
        // strict chain. Note that it's okay to call `update_best_info`
        // multiple times, and we only generate blocks after
        // `ready_for_mining` is true.
        self.update_best_info(true);
        if let Err(e) = self
            .txpool
            .notify_new_best_info(self.best_info.read_recursive().clone())
        {
            error!("wait for generation: notify_new_best_info err={:?}", e);
        }
    }

    /// After considering the latest `pos_reference`, `parent_hash` may become
    /// an invalid choice, so this function tries to update the parent and
    /// referee choices with `pos_reference` provided.
    pub fn choose_correct_parent(
        &self, parent_hash: &mut H256, referees: &mut Vec<H256>,
        blame_info: &mut StateBlameInfo, pos_reference: Option<PosBlockId>,
    ) {
        let correct_parent_hash = {
            if let Some(pos_ref) = &pos_reference {
                loop {
                    let inner = self.inner.read();
                    let pivot_decision = inner
                        .pos_verifier
                        .get_pivot_decision(pos_ref)
                        .expect("pos ref committed");
                    if inner.hash_to_arena_indices.contains_key(&pivot_decision)
                        || inner.pivot_block_processed(&pivot_decision)
                    {
                        // If this pos ref is processed in catching-up, its
                        // pivot decision may have not been processed
                        break;
                    } else {
                        // Wait without holding consensus inner lock.
                        drop(inner);
                        warn!("Wait for PoW to catch up with PoS");
                        sleep(Duration::from_secs(1));
                    }
                }
            }
            // recompute `blame_info` needs locking `self.inner`, so we limit
            // the lock scope here.
            let mut inner = self.inner.write();
            referees.retain(|h| inner.hash_to_arena_indices.contains_key(h));
            let parent_index =
                *inner.hash_to_arena_indices.get(parent_hash).expect(
                    "parent_hash is the pivot chain tip,\
                     so should still exist in ConsensusInner",
                );
            let referee_indices: Vec<_> = referees
                .iter()
                .map(|h| {
                    *inner
                        .hash_to_arena_indices
                        .get(h)
                        .expect("Checked by the caller")
                })
                .collect();
            let correct_parent = inner.choose_correct_parent(
                parent_index,
                referee_indices,
                pos_reference,
            );
            inner.arena[correct_parent].hash
        };

        if correct_parent_hash != *parent_hash {
            debug!(
                "Change parent from {:?} to {:?}",
                parent_hash, correct_parent_hash
            );

            // correct_parent may be among referees, so check and remove it.
            referees.retain(|i| *i != correct_parent_hash);

            // Old parent is a valid block terminal to refer to.
            if referees.len() < self.config.referee_bound {
                referees.push(*parent_hash);
            }

            // correct_parent may not be on the pivot chain, so recompute
            // blame_info if needed.
            *blame_info = self
                .force_compute_blame_and_deferred_state_for_generation(
                    &correct_parent_hash,
                )
                .expect("blame info computation error");
            *parent_hash = correct_parent_hash;
        }
    }

    /// Force the engine to recompute the deferred state root for a particular
    /// block given a delay.
    pub fn force_compute_blame_and_deferred_state_for_generation(
        &self, parent_block_hash: &H256,
    ) -> Result<StateBlameInfo, String> {
        {
            let inner = &mut *self.inner.write();
            let hash = inner
                .get_state_block_with_delay(
                    parent_block_hash,
                    DEFERRED_STATE_EPOCH_COUNT as usize - 1,
                )?
                .clone();
            self.executor.compute_state_for_block(&hash, inner)?;
        }
        self.executor.get_blame_and_deferred_state_for_generation(
            parent_block_hash,
            &self.inner,
        )
    }

    pub fn get_blame_and_deferred_state_for_generation(
        &self, parent_block_hash: &H256,
    ) -> Result<StateBlameInfo, String> {
        self.executor.get_blame_and_deferred_state_for_generation(
            parent_block_hash,
            &self.inner,
        )
    }
}