1pub type ActualSlabIndex = u32;
6
7pub type VacantEntry<'a, TrieNode> =
8 super::slab::VacantEntry<'a, TrieNode, TrieNode>;
9
10pub type TrieNodeCell<CacheAlgoDataT> =
13 UnsafeCell<MemOptimizedTrieNode<CacheAlgoDataT>>;
14type Allocator<CacheAlgoDataT> =
15 Slab<TrieNodeCell<CacheAlgoDataT>, TrieNodeCell<CacheAlgoDataT>>;
16pub type AllocatorRef<'a, CacheAlgoDataT> =
17 RwLockReadGuard<'a, Allocator<CacheAlgoDataT>>;
18pub type AllocatorRefRef<'a, CacheAlgoDataT> =
19 &'a AllocatorRef<'a, CacheAlgoDataT>;
20
21pub type RLFUPosT = u32;
22pub type DeltaMptsCacheAlgorithm = LRU<RLFUPosT, (DeltaMptId, DeltaMptDbKey)>;
23pub type DeltaMptsCacheAlgoData =
24 <DeltaMptsCacheAlgorithm as CacheAlgorithm>::CacheAlgoData;
25
26pub type TrieNodeDeltaMpt = MemOptimizedTrieNode<DeltaMptsCacheAlgoData>;
27pub type TrieNodeDeltaMptCell = TrieNodeCell<DeltaMptsCacheAlgoData>;
28
29pub type SlabVacantEntryDeltaMpt<'a> = VacantEntry<'a, TrieNodeDeltaMptCell>;
30pub type AllocatorRefRefDeltaMpt<'a> =
31 &'a AllocatorRef<'a, DeltaMptsCacheAlgoData>;
32
33pub type DeltaMptsNodeMemoryManager =
34 NodeMemoryManager<DeltaMptsCacheAlgoData, DeltaMptsCacheAlgorithm>;
35pub type DeltaMptsCacheManager =
36 CacheManagerDeltaMpts<DeltaMptsCacheAlgoData, DeltaMptsCacheAlgorithm>;
37
38impl CacheIndexTrait for DeltaMptDbKey {}
39
40#[derive(MallocSizeOfDerive)]
41pub struct NodeMemoryManager<
42 CacheAlgoDataT: CacheAlgoDataTrait,
43 CacheAlgorithmT: CacheAlgorithm<CacheAlgoData = CacheAlgoDataT>,
44> {
45 size_limit: u32,
47 idle_size: u32,
50 allocator: RwLock<Allocator<CacheAlgoDataT>>,
54 cache: Mutex<CacheManagerDeltaMpts<CacheAlgoDataT, CacheAlgorithmT>>,
56 db_load_lock: Mutex<()>,
60
61 db_load_counter: AtomicUsize,
63 uncached_leaf_load_times: AtomicUsize,
64 uncached_leaf_db_loads: AtomicUsize,
65 pub compute_merkle_db_loads: AtomicUsize,
66 children_merkle_db_loads: AtomicUsize,
67}
68
69impl<
70 CacheAlgoDataT: CacheAlgoDataTrait,
71 CacheAlgorithmT: CacheAlgorithm<CacheAlgoData = CacheAlgoDataT>,
72 > NodeMemoryManager<CacheAlgoDataT, CacheAlgorithmT>
73{
74 pub const MAX_CACHED_TRIE_NODES_DISK_HYBRID: u32 = 10_000_000;
80 pub const MAX_CACHED_TRIE_NODES_R_LFU_COUNTER: u32 = (Self::R_LFU_FACTOR
81 * Self::MAX_CACHED_TRIE_NODES_DISK_HYBRID as f64)
82 as u32;
83 pub const MAX_DIRTY_AND_TEMPORARY_TRIE_NODES: u32 = 5_000_000;
87 pub const MAX_TRIE_NODES_MEM_ONLY: u32 = 27_600_000;
95 pub const R_LFU_FACTOR: f64 = 4.0;
96 pub const START_CAPACITY: u32 = 1_000_000;
97}
98
99impl<
100 CacheAlgoDataT: CacheAlgoDataTrait,
101 CacheAlgorithmT: CacheAlgorithm<
102 CacheAlgoData = CacheAlgoDataT,
103 CacheIndex = (DeltaMptId, DeltaMptDbKey),
104 >,
105 > NodeMemoryManager<CacheAlgoDataT, CacheAlgorithmT>
106{
107 pub fn new(
108 cache_start_size: u32, cache_size: u32, idle_size: u32,
109 _node_ref_map_vec_size: u32, cache_algorithm: CacheAlgorithmT,
110 ) -> Self {
111 let size_limit = cache_size + idle_size;
112 Self {
113 size_limit,
114 idle_size,
115 allocator: RwLock::new(
116 Slab::with_capacity((cache_start_size + idle_size) as usize)
117 .into(),
118 ),
119 cache: Mutex::new(CacheManagerDeltaMpts {
120 node_ref_map: NodeRefMapDeltaMpts::new(),
121 cache_algorithm,
122 }),
123 db_load_lock: Default::default(),
124 db_load_counter: Default::default(),
125 uncached_leaf_db_loads: Default::default(),
126 uncached_leaf_load_times: Default::default(),
127 compute_merkle_db_loads: Default::default(),
128 children_merkle_db_loads: Default::default(),
129 }
130 }
131
132 pub fn get_allocator(&self) -> AllocatorRef<'_, CacheAlgoDataT> {
133 self.allocator.read_recursive()
134 }
135
136 pub fn get_cache_manager(
137 &self,
138 ) -> &Mutex<CacheManagerDeltaMpts<CacheAlgoDataT, CacheAlgorithmT>> {
139 &self.cache
140 }
141
142 pub fn enlarge(&self) -> Result<()> {
144 let allocator_upgradable_read = self.allocator.upgradable_read();
145 let allocator_capacity = allocator_upgradable_read.capacity();
146 let occupied_size = allocator_upgradable_read.len();
147 let idle = allocator_capacity - occupied_size;
148 let should_idle = self.idle_size as usize;
149 if idle >= should_idle || allocator_capacity == self.size_limit as usize
150 {
151 return Ok(());
152 }
153 let mut add_size = should_idle - idle;
154 if add_size < allocator_capacity {
155 add_size = allocator_capacity;
156 }
157 let max_add_size = self.size_limit as usize - occupied_size;
158 if add_size >= max_add_size {
159 add_size = max_add_size;
160 }
161 let mut allocator_mut =
162 RwLockUpgradableReadGuard::upgrade(allocator_upgradable_read);
163 allocator_mut.reserve_exact(add_size)?;
164 Ok(())
165 }
166
167 pub fn delete_mpt_from_cache(&self, mpt_id: DeltaMptId) {
168 let cache_mut = &mut *self.cache.lock();
169 let cache_infos =
170 cache_mut.node_ref_map.get_all_cache_infos_from_mpt(mpt_id);
171 for (db_key, cache_info) in cache_infos {
172 self.delete_from_cache(
173 &mut cache_mut.cache_algorithm,
174 &mut cache_mut.node_ref_map,
175 (mpt_id, db_key),
176 cache_info,
177 );
178 cache_mut.node_ref_map.delete((mpt_id, db_key));
179 }
180 }
181
182 pub fn log_uncached_key_access(&self, db_load_count: i32) {
183 if db_load_count != 0 {
184 self.uncached_leaf_db_loads
185 .fetch_add(db_load_count as usize, Ordering::Relaxed);
186 self.uncached_leaf_load_times
187 .fetch_add(1, Ordering::Relaxed);
188 }
189 }
190
191 pub unsafe fn get_in_memory_cell<'a>(
192 allocator: AllocatorRefRef<'a, CacheAlgoDataT>, cache_slot: usize,
193 ) -> &'a TrieNodeCell<CacheAlgoDataT> {
194 allocator.get_unchecked(cache_slot)
195 }
196
197 pub unsafe fn get_in_memory_node_mut<'a>(
198 allocator: AllocatorRefRef<'a, CacheAlgoDataT>, cache_slot: usize,
199 ) -> &'a mut MemOptimizedTrieNode<CacheAlgoDataT> {
200 allocator.get_unchecked(cache_slot).get_as_mut()
201 }
202
203 fn load_from_db<'c: 'a, 'a>(
204 &self, allocator: AllocatorRefRef<'a, CacheAlgoDataT>,
205 cache_manager: &'c Mutex<
206 CacheManagerDeltaMpts<CacheAlgoDataT, CacheAlgorithmT>,
207 >,
208 db: &mut DeltaDbOwnedReadTraitObj, mpt_id: DeltaMptId,
209 db_key: DeltaMptDbKey,
210 ) -> Result<
211 GuardedValue<
212 MutexGuard<
213 'c,
214 CacheManagerDeltaMpts<CacheAlgoDataT, CacheAlgorithmT>,
215 >,
216 &'a TrieNodeCell<CacheAlgoDataT>,
217 >,
218 > {
219 self.db_load_counter.fetch_add(1, Ordering::Relaxed);
220 let tmp: i64 = db_key.try_into().unwrap();
221 trace!("load_from_db: {:?} {:?}", db_key, tmp);
222 let rlp_bytes = db
224 .get_mut_with_number_key(
225 db_key.try_into().expect("not exceed i64::MAX"),
226 )?
227 .unwrap();
228 let rlp = Rlp::new(rlp_bytes.as_ref());
229 let mut trie_node = MemOptimizedTrieNode::decode(&rlp)?;
230
231 let mut cache_manager_locked = cache_manager.lock();
232 let trie_cell_ref: &TrieNodeCell<CacheAlgoDataT>;
233
234 let cache_mut = &mut *cache_manager_locked;
235
236 match cache_mut.node_ref_map.get_cache_info((mpt_id, db_key)) {
238 None => {}
239 Some(cache_info) => match cache_info.get_cache_info() {
240 TrieCacheSlotOrCacheAlgoData::TrieCacheSlot(_cache_slot) => {
241 unreachable!()
243 }
244
245 TrieCacheSlotOrCacheAlgoData::CacheAlgoData(
246 cache_algo_data,
247 ) => {
248 trie_node.cache_algo_data = *cache_algo_data;
249 }
250 },
251 }
252 let slot = allocator.insert(&trie_node)?;
254 trie_cell_ref = unsafe { allocator.get_unchecked(slot) };
255 let cache_insertion_result = cache_mut
256 .node_ref_map
257 .insert((mpt_id, db_key), slot as ActualSlabIndex);
258 if cache_insertion_result.is_err() {
259 allocator.remove(slot)?;
260
261 cache_insertion_result?;
263 }
264
265 Ok(GuardedValue::new(cache_manager_locked, trie_cell_ref))
266 }
267
268 pub fn load_children_merkles_from_db(
269 &self, db: &mut DeltaDbOwnedReadTraitObj, db_key: DeltaMptDbKey,
270 ) -> Result<Option<CompactedChildrenTable<MerkleHash>>> {
271 self.children_merkle_db_loads
272 .fetch_add(1, Ordering::Relaxed);
273 let rlp_bytes = match db.get_mut(format!("cm{}", db_key).as_bytes())? {
275 None => return Ok(None),
276 Some(rlp_bytes) => rlp_bytes,
277 };
278 let rlp = Rlp::new(rlp_bytes.as_ref());
279 let table = CompactedChildrenTable::from(
280 ChildrenTable::<MerkleHash>::decode(&rlp)?,
281 );
282 Ok(Some(table))
283 }
284
285 fn delete_from_cache(
286 &self, cache_algorithm: &mut CacheAlgorithmT,
287 node_ref_map: &mut NodeRefMapDeltaMpts<CacheAlgoDataT>,
288 cache_index: CacheAlgorithmT::CacheIndex,
289 cache_info: CacheableNodeRefDeltaMpt<CacheAlgoDataT>,
290 ) {
291 cache_algorithm
292 .delete(cache_index, &mut NodeCacheUtil::new(self, node_ref_map));
293
294 match cache_info.get_cache_info() {
295 TrieCacheSlotOrCacheAlgoData::TrieCacheSlot(slot) => {
296 self.get_allocator().remove((*slot) as usize).unwrap();
297 }
298 _ => {}
299 }
300 }
301
302 unsafe fn delete_cache_evicted_unchecked(
303 &self,
304 cache_mut: &mut CacheManagerDeltaMpts<CacheAlgoDataT, CacheAlgorithmT>,
305 evicted_cache_index: CacheAlgorithmT::CacheIndex,
306 ) {
307 let cache_info =
309 cache_mut.node_ref_map.delete(evicted_cache_index).unwrap();
310 match cache_info.get_cache_info() {
311 TrieCacheSlotOrCacheAlgoData::TrieCacheSlot(slot) => {
312 self.get_allocator().remove((*slot) as usize).unwrap();
313 }
314 _ => {}
315 }
316 }
317
318 unsafe fn delete_cache_evicted_keep_cache_algo_data_unchecked(
319 &self,
320 cache_mut: &mut CacheManagerDeltaMpts<CacheAlgoDataT, CacheAlgorithmT>,
321 evicted_keep_cache_algo_data_cache_index: CacheAlgorithmT::CacheIndex,
322 ) {
323 let slot = *cache_mut
327 .node_ref_map
328 .get_cache_info(evicted_keep_cache_algo_data_cache_index)
329 .unwrap()
330 .get_slot()
331 .unwrap() as usize;
332
333 cache_mut.node_ref_map.set_cache_info(
334 evicted_keep_cache_algo_data_cache_index,
335 CacheableNodeRefDeltaMpt::new(
336 TrieCacheSlotOrCacheAlgoData::CacheAlgoData(
337 self.get_allocator()
338 .get_unchecked(slot)
339 .get_ref()
340 .cache_algo_data,
341 ),
342 ),
343 );
344 self.get_allocator().remove(slot).unwrap();
345 }
346
347 pub fn call_cache_algorithm_access(
349 &self,
350 cache_mut: &mut CacheManagerDeltaMpts<CacheAlgoDataT, CacheAlgorithmT>,
351 cache_index: CacheAlgorithmT::CacheIndex,
352 ) {
353 let cache_access_result;
354 {
355 let mut cache_store_util =
356 NodeCacheUtil::new(self, &mut cache_mut.node_ref_map);
357 cache_access_result = cache_mut
358 .cache_algorithm
359 .access(cache_index, &mut cache_store_util);
360 }
361 match cache_access_result {
362 CacheAccessResult::MissReplaced {
363 evicted: evicted_cache_indices,
364 evicted_keep_cache_algo_data:
365 evicted_keep_cache_algo_data_cache_indices,
366 } => unsafe {
367 for evicted_cache_index in evicted_cache_indices {
368 self.delete_cache_evicted_unchecked(
369 cache_mut,
370 evicted_cache_index,
371 );
372 }
373 for evicted_keep_cache_algo_data_cache_index in
374 evicted_keep_cache_algo_data_cache_indices
375 {
376 self.delete_cache_evicted_keep_cache_algo_data_unchecked(
377 cache_mut,
378 evicted_keep_cache_algo_data_cache_index,
379 );
380 }
381 },
382 _ => {}
383 }
384 }
385
386 pub unsafe fn dirty_node_as_mut_unchecked<'a>(
391 &self, allocator: AllocatorRefRef<'a, CacheAlgoDataT>,
392 node: &mut NodeRefDeltaMpt,
393 ) -> &'a mut MemOptimizedTrieNode<CacheAlgoDataT> {
394 match node {
395 NodeRefDeltaMpt::Committed { db_key: _ } => {
396 unreachable!();
397 }
398 NodeRefDeltaMpt::Dirty { ref index } => NodeMemoryManager::<
399 CacheAlgoDataT,
400 CacheAlgorithmT,
401 >::get_in_memory_node_mut(
402 &allocator,
403 *index as usize,
404 ),
405 }
406 }
407
408 unsafe fn load_unowned_node_cell_internal_unchecked<'c: 'a, 'a>(
410 &self, allocator: AllocatorRefRef<'a, CacheAlgoDataT>,
411 node: NodeRefDeltaMpt,
412 cache_manager: &'c Mutex<
413 CacheManagerDeltaMpts<CacheAlgoDataT, CacheAlgorithmT>,
414 >,
415 db: &mut DeltaDbOwnedReadTraitObj, mpt_id: DeltaMptId,
416 is_loaded_from_db: &mut bool,
417 ) -> Result<
418 GuardedValue<
419 Option<
420 MutexGuard<
421 'c,
422 CacheManagerDeltaMpts<CacheAlgoDataT, CacheAlgorithmT>,
423 >,
424 >,
425 &'a TrieNodeCell<CacheAlgoDataT>,
426 >,
427 > {
428 match node {
429 NodeRefDeltaMpt::Committed { ref db_key } => {
430 let mut cache_manager_mut_wrapped = Some(cache_manager.lock());
431
432 let maybe_cache_slot = cache_manager_mut_wrapped
433 .as_mut()
434 .unwrap()
435 .node_ref_map
436 .get_cache_info((mpt_id, *db_key))
437 .and_then(|x| x.get_slot());
438
439 let trie_node = match maybe_cache_slot {
440 Some(cache_slot) => {
441 NodeMemoryManager::<
443 CacheAlgoDataT,
444 CacheAlgorithmT,
445 >::get_in_memory_cell(
446 &allocator, *cache_slot as usize
447 )
448 }
449 None => {
450 cache_manager_mut_wrapped.take();
453
454 let _db_load_mutex = self.db_load_lock.lock();
457 cache_manager_mut_wrapped = Some(cache_manager.lock());
458 let maybe_cache_slot = cache_manager_mut_wrapped
459 .as_mut()
460 .unwrap()
461 .node_ref_map
462 .get_cache_info((mpt_id, *db_key))
463 .and_then(|x| x.get_slot());
464
465 match maybe_cache_slot {
466 Some(cache_slot) => NodeMemoryManager::<
467 CacheAlgoDataT,
468 CacheAlgorithmT,
469 >::get_in_memory_cell(
470 &allocator,
471 *cache_slot as usize,
472 ),
473 None => {
474 cache_manager_mut_wrapped.take();
477
478 let (guard, loaded_trie_node) = self
479 .load_from_db(
480 allocator,
481 cache_manager,
482 db,
483 mpt_id,
484 *db_key,
485 )?
486 .into();
487
488 cache_manager_mut_wrapped = Some(guard);
489
490 *is_loaded_from_db = true;
491
492 loaded_trie_node
493 }
494 }
495 }
496 };
497
498 self.call_cache_algorithm_access(
499 cache_manager_mut_wrapped.as_mut().unwrap(),
500 (mpt_id, *db_key),
501 );
502
503 Ok(GuardedValue::new(cache_manager_mut_wrapped, trie_node))
504 }
505 NodeRefDeltaMpt::Dirty { index: _ } => unreachable!(),
506 }
507 }
508
509 unsafe fn get_cached_node_mut_unchecked<'a>(
511 &self, allocator: AllocatorRefRef<'a, CacheAlgoDataT>,
512 slot: ActualSlabIndex,
513 ) -> &'a mut MemOptimizedTrieNode<CacheAlgoDataT> {
514 NodeMemoryManager::<CacheAlgoDataT, CacheAlgorithmT>::get_in_memory_node_mut(
515 &allocator,
516 slot as usize,
517 )
518 }
519
520 pub fn node_cell_with_cache_manager<'c: 'a, 'a>(
523 &self, allocator: AllocatorRefRef<'a, CacheAlgoDataT>,
524 node: NodeRefDeltaMpt,
525 cache_manager: &'c Mutex<
526 CacheManagerDeltaMpts<CacheAlgoDataT, CacheAlgorithmT>,
527 >,
528 db: &mut DeltaDbOwnedReadTraitObj, mpt_id: DeltaMptId,
529 is_loaded_from_db: &mut bool,
530 ) -> Result<
531 GuardedValue<
532 Option<
533 MutexGuard<
534 'c,
535 CacheManagerDeltaMpts<CacheAlgoDataT, CacheAlgorithmT>,
536 >,
537 >,
538 &'a TrieNodeCell<CacheAlgoDataT>,
539 >,
540 > {
541 match node {
542 NodeRefDeltaMpt::Committed { db_key: _ } => unsafe {
543 self.load_unowned_node_cell_internal_unchecked(
544 allocator,
545 node,
546 cache_manager,
547 db,
548 mpt_id,
549 is_loaded_from_db,
550 )
551 },
552 NodeRefDeltaMpt::Dirty { ref index } => unsafe {
553 Ok(GuardedValue::new(None, NodeMemoryManager::<
554 CacheAlgoDataT,
555 CacheAlgorithmT,
556 >::get_in_memory_cell(
557 &allocator,
558 *index as usize,
559 )))
560 },
561 }
562 }
563
564 pub fn node_as_ref_with_cache_manager<'c: 'a, 'a>(
567 &self, allocator: AllocatorRefRef<'a, CacheAlgoDataT>,
568 node: NodeRefDeltaMpt,
569 cache_manager: &'c Mutex<
570 CacheManagerDeltaMpts<CacheAlgoDataT, CacheAlgorithmT>,
571 >,
572 db: &mut DeltaDbOwnedReadTraitObj, mpt_id: DeltaMptId,
573 is_loaded_from_db: &mut bool,
574 ) -> Result<
575 GuardedValue<
576 Option<
577 MutexGuard<
578 'c,
579 CacheManagerDeltaMpts<CacheAlgoDataT, CacheAlgorithmT>,
580 >,
581 >,
582 &'a MemOptimizedTrieNode<CacheAlgoDataT>,
583 >,
584 > {
585 self.node_cell_with_cache_manager(
586 allocator,
587 node,
588 cache_manager,
589 db,
590 mpt_id,
591 is_loaded_from_db,
592 )
593 .map(|gv| {
594 let (g, v) = gv.into();
595 GuardedValue::new(g, v.get_ref())
596 })
597 }
598
599 pub fn new_node<'a>(
600 allocator: AllocatorRefRef<'a, CacheAlgoDataT>,
601 ) -> Result<(
602 NodeRefDeltaMpt,
603 VacantEntry<'a, TrieNodeCell<CacheAlgoDataT>>,
604 )> {
605 let vacant_entry = allocator.vacant_entry()?;
606 let node = NodeRefDeltaMpt::Dirty {
607 index: vacant_entry.key() as ActualSlabIndex,
608 };
609 Ok((node, vacant_entry))
610 }
611
612 pub fn free_owned_node(
616 &self, node: &mut NodeRefDeltaMpt, mpt_id: DeltaMptId,
617 ) {
618 let slot = match node {
619 NodeRefDeltaMpt::Committed { ref db_key } => {
620 let maybe_cache_info =
621 self.cache.lock().node_ref_map.delete((mpt_id, *db_key));
622 let maybe_cache_slot = maybe_cache_info
623 .as_ref()
624 .and_then(|cache_info| cache_info.get_slot());
625
626 match maybe_cache_slot {
627 None => return,
628 Some(slot) => *slot,
629 }
630 }
631 NodeRefDeltaMpt::Dirty { ref index } => *index,
632 };
633
634 self.get_allocator().remove(slot as usize).unwrap();
637 }
638
639 pub fn log_usage(&self) {
640 self.cache.lock().log_usage();
641 let allocator_ref = self.get_allocator();
642 debug!(
643 "trie node allocator: max allowed size: {}, \
644 configured idle_size: {}, size: {}, allocated: {}",
645 self.size_limit,
646 self.idle_size,
647 allocator_ref.capacity(),
648 allocator_ref.len()
649 );
650 debug!(
651 "number of nodes loaded from db {}",
652 self.db_load_counter.load(Ordering::Relaxed)
653 );
654 debug!(
655 "number of uncached leaf node loads {}",
656 self.uncached_leaf_load_times.load(Ordering::Relaxed)
657 );
658 debug!(
659 "number of db loads for uncached leaf nodes {}",
660 self.uncached_leaf_db_loads.load(Ordering::Relaxed)
661 );
662 debug!(
663 "number of db loads for merkle computation {}",
664 self.compute_merkle_db_loads.load(Ordering::Relaxed)
665 );
666 debug!(
667 "number of db loads for children merkles {}",
668 self.children_merkle_db_loads.load(Ordering::Relaxed)
669 );
670 }
671}
672
673struct NodeCacheUtil<
674 'a,
675 CacheAlgoDataT: CacheAlgoDataTrait,
676 CacheAlgorithmT: CacheAlgorithm<CacheAlgoData = CacheAlgoDataT>,
677> {
678 node_memory_manager: &'a NodeMemoryManager<CacheAlgoDataT, CacheAlgorithmT>,
679 node_ref_map: &'a mut NodeRefMapDeltaMpts<CacheAlgoDataT>,
680}
681
682impl<
683 'a,
684 CacheAlgoDataT: CacheAlgoDataTrait,
685 CacheAlgorithmT: CacheAlgorithm<CacheAlgoData = CacheAlgoDataT>,
686 > NodeCacheUtil<'a, CacheAlgoDataT, CacheAlgorithmT>
687{
688 fn new(
689 node_memory_manager: &'a NodeMemoryManager<
690 CacheAlgoDataT,
691 CacheAlgorithmT,
692 >,
693 node_ref_map: &'a mut NodeRefMapDeltaMpts<CacheAlgoDataT>,
694 ) -> Self {
695 NodeCacheUtil {
696 node_memory_manager,
697 node_ref_map,
698 }
699 }
700}
701
702impl<
703 'a,
704 CacheAlgoDataT: CacheAlgoDataTrait,
705 CacheAlgorithmT: CacheAlgorithm<
706 CacheAlgoData = CacheAlgoDataT,
707 CacheIndex = (DeltaMptId, DeltaMptDbKey),
708 >,
709 > CacheStoreUtil for NodeCacheUtil<'a, CacheAlgoDataT, CacheAlgorithmT>
710{
711 type CacheAlgoData = CacheAlgoDataT;
712 type ElementIndex = CacheAlgorithmT::CacheIndex;
713
714 fn get(&self, cache_idnex: Self::ElementIndex) -> Self::CacheAlgoData {
715 match self
716 .node_ref_map
717 .get_cache_info(cache_idnex)
718 .unwrap()
719 .get_cache_info()
720 {
721 TrieCacheSlotOrCacheAlgoData::TrieCacheSlot(slot) => {
722 let allocator = self.node_memory_manager.get_allocator();
723 unsafe {
724 self.node_memory_manager
725 .get_cached_node_mut_unchecked(&allocator, *slot)
726 .cache_algo_data
727 }
728 }
729 TrieCacheSlotOrCacheAlgoData::CacheAlgoData(cache_algo_data) => {
730 cache_algo_data.clone()
731 }
732 }
733 }
734
735 fn set(
736 &mut self, cache_index: Self::ElementIndex,
737 algo_data: &Self::CacheAlgoData,
738 ) {
739 match self
740 .node_ref_map
741 .get_cache_info(cache_index)
742 .unwrap()
743 .get_cache_info()
744 {
745 TrieCacheSlotOrCacheAlgoData::TrieCacheSlot(slot) => {
746 let allocator = self.node_memory_manager.get_allocator();
747 unsafe {
748 self.node_memory_manager
749 .get_cached_node_mut_unchecked(&allocator, *slot)
750 .cache_algo_data = *algo_data;
751 }
752 }
753 TrieCacheSlotOrCacheAlgoData::CacheAlgoData(_) => {
754 self.node_ref_map.set_cache_info(
755 cache_index,
756 CacheableNodeRefDeltaMpt::new(
757 TrieCacheSlotOrCacheAlgoData::CacheAlgoData(*algo_data),
758 ),
759 );
760 }
761 }
762 }
763}
764
765use super::{
766 super::{
767 super::{
768 storage_db::delta_db_manager::DeltaDbOwnedReadTraitObj,
769 utils::{guarded_value::*, UnsafeCellExtension},
770 },
771 errors::*,
772 merkle_patricia_trie::children_table::*,
773 },
774 cache::algorithm::{
775 lru::LRU, CacheAccessResult, CacheAlgoDataTrait, CacheAlgorithm,
776 CacheIndexTrait, CacheStoreUtil,
777 },
778 cache_manager_delta_mpts::CacheManagerDeltaMpts,
779 mem_optimized_trie_node::MemOptimizedTrieNode,
780 node_ref_map::*,
781 slab::Slab,
782 NodeRefDeltaMpt,
783};
784use malloc_size_of_derive::MallocSizeOf as MallocSizeOfDerive;
785use parking_lot::{
786 Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard,
787};
788use primitives::MerkleHash;
789use rlp::*;
790use std::{
791 cell::UnsafeCell,
792 convert::TryInto,
793 sync::atomic::{AtomicUsize, Ordering},
794};