1use futures::{select, stream::FuturesUnordered, FutureExt, StreamExt};
28use std::{collections::BTreeMap, future::Future, marker::PhantomData, sync::Arc, time::Duration};
29use tokio::{
30 sync::{mpsc, oneshot, Semaphore},
31 time::sleep,
32};
33use tracing::{instrument, Instrument};
34
35use sc_client_api::backend::{Backend, StateBackend, StorageProvider};
36use sc_utils::mpsc::TracingUnboundedSender;
37use sp_api::{ApiExt, Core, ProvideRuntimeApi};
38use sp_block_builder::BlockBuilder;
39use sp_blockchain::{
40 Backend as BlockchainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata,
41};
42use sp_runtime::traits::{BlakeTwo256, Block as BlockT, Header as HeaderT};
43use substrate_prometheus_endpoint::{
44 register, Counter, PrometheusError, Registry as PrometheusRegistry, U64,
45};
46
47use ethereum_types::H256;
48use fc_storage::StorageOverride;
49use fp_rpc::EthereumRuntimeRPCApi;
50
51use moonbeam_client_evm_tracing::{
52 formatters::ResponseFormatter,
53 types::block::{self, TransactionTrace},
54};
55pub use moonbeam_rpc_core_trace::{FilterRequest, TraceServer};
56use moonbeam_rpc_core_types::{RequestBlockId, RequestBlockTag};
57use moonbeam_rpc_primitives_debug::DebugRuntimeApi;
58
59type TxsTraceRes = Result<Vec<TransactionTrace>, String>;
60
61pub struct Trace<B, C> {
63 _phantom: PhantomData<B>,
64 client: Arc<C>,
65 requester: CacheRequester,
66 max_count: u32,
67}
68
69impl<B, C> Clone for Trace<B, C> {
70 fn clone(&self) -> Self {
71 Self {
72 _phantom: PhantomData,
73 client: Arc::clone(&self.client),
74 requester: self.requester.clone(),
75 max_count: self.max_count,
76 }
77 }
78}
79
80impl<B, C> Trace<B, C>
81where
82 B: BlockT<Hash = H256> + Send + Sync + 'static,
83 B::Header: HeaderT<Number = u32>,
84 C: HeaderMetadata<B, Error = BlockChainError> + HeaderBackend<B>,
85 C: Send + Sync + 'static,
86{
87 pub fn new(client: Arc<C>, requester: CacheRequester, max_count: u32) -> Self {
89 Self {
90 client,
91 requester,
92 max_count,
93 _phantom: PhantomData,
94 }
95 }
96
97 fn block_id(&self, id: Option<RequestBlockId>) -> Result<u32, &'static str> {
99 match id {
100 Some(RequestBlockId::Number(n)) => Ok(n),
101 None | Some(RequestBlockId::Tag(RequestBlockTag::Latest)) => {
102 Ok(self.client.info().best_number)
103 }
104 Some(RequestBlockId::Tag(RequestBlockTag::Earliest)) => Ok(0),
105 Some(RequestBlockId::Tag(RequestBlockTag::Finalized)) => {
106 Ok(self.client.info().finalized_number)
107 }
108 Some(RequestBlockId::Tag(RequestBlockTag::Pending)) => {
109 Err("'pending' is not supported")
110 }
111 Some(RequestBlockId::Hash(_)) => Err("Block hash not supported"),
112 }
113 }
114
115 async fn filter(self, req: FilterRequest) -> TxsTraceRes {
117 let from_block = self.block_id(req.from_block)?;
118 let to_block = self.block_id(req.to_block)?;
119 let block_heights = from_block..=to_block;
120
121 let count = req.count.unwrap_or(self.max_count);
122 if count > self.max_count {
123 return Err(format!(
124 "count ({}) can't be greater than maximum ({})",
125 count, self.max_count
126 ));
127 }
128
129 let mut block_hashes = vec![];
131 for block_height in block_heights {
132 if block_height == 0 {
133 continue; }
135
136 let block_hash = self
137 .client
138 .hash(block_height)
139 .map_err(|e| {
140 format!(
141 "Error when fetching block {} header : {:?}",
142 block_height, e
143 )
144 })?
145 .ok_or_else(|| format!("Block with height {} don't exist", block_height))?;
146
147 block_hashes.push(block_hash);
148 }
149
150 let batch_id = self.requester.start_batch(block_hashes.clone()).await?;
152 let res = self.fetch_traces(req, &block_hashes, count as usize).await;
156 self.requester.stop_batch(batch_id).await;
159
160 res
161 }
162
163 async fn fetch_traces(
164 &self,
165 req: FilterRequest,
166 block_hashes: &[H256],
167 count: usize,
168 ) -> TxsTraceRes {
169 let from_address = req.from_address.unwrap_or_default();
170 let to_address = req.to_address.unwrap_or_default();
171
172 let mut traces_amount: i64 = -(req.after.unwrap_or(0) as i64);
173 let mut traces = vec![];
174
175 for &block_hash in block_hashes {
176 let block_traces = self.requester.get_traces(block_hash).await?;
180
181 let mut block_traces: Vec<_> = block_traces
183 .iter()
184 .filter(|trace| match trace.action {
185 block::TransactionTraceAction::Call { from, to, .. } => {
186 (from_address.is_empty() || from_address.contains(&from))
187 && (to_address.is_empty() || to_address.contains(&to))
188 }
189 block::TransactionTraceAction::Create { from, .. } => {
190 (from_address.is_empty() || from_address.contains(&from))
191 && to_address.is_empty()
192 }
193 block::TransactionTraceAction::Suicide { address, .. } => {
194 (from_address.is_empty() || from_address.contains(&address))
195 && to_address.is_empty()
196 }
197 })
198 .cloned()
199 .collect();
200
201 traces_amount += block_traces.len() as i64;
203 if traces_amount > 0 {
204 let traces_amount = traces_amount as usize;
205 if traces_amount < block_traces.len() {
208 let skip = block_traces.len() - traces_amount;
209 block_traces = block_traces.into_iter().skip(skip).collect();
210 }
211
212 traces.append(&mut block_traces);
213
214 if traces_amount >= count {
217 if req.count.is_none() {
218 return Err(format!(
219 "the amount of traces goes over the maximum ({}), please use 'after' \
220 and 'count' in your request",
221 self.max_count
222 ));
223 }
224
225 traces = traces.into_iter().take(count).collect();
226 break;
227 }
228 }
229 }
230
231 Ok(traces)
232 }
233}
234
235#[jsonrpsee::core::async_trait]
236impl<B, C> TraceServer for Trace<B, C>
237where
238 B: BlockT<Hash = H256> + Send + Sync + 'static,
239 B::Header: HeaderT<Number = u32>,
240 C: HeaderMetadata<B, Error = BlockChainError> + HeaderBackend<B>,
241 C: Send + Sync + 'static,
242{
243 async fn filter(
244 &self,
245 filter: FilterRequest,
246 ) -> jsonrpsee::core::RpcResult<Vec<TransactionTrace>> {
247 self.clone()
248 .filter(filter)
249 .await
250 .map_err(fc_rpc::internal_err)
251 }
252}
253
254#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
256pub struct CacheBatchId(u64);
257
258enum CacheRequest {
260 StartBatch {
263 sender: oneshot::Sender<CacheBatchId>,
265 blocks: Vec<H256>,
267 },
268 GetTraces {
271 sender: oneshot::Sender<TxsTraceRes>,
273 block: H256,
275 },
276 StopBatch { batch_id: CacheBatchId },
279}
280
281#[derive(Clone)]
283pub struct CacheRequester(TracingUnboundedSender<CacheRequest>);
284
285impl CacheRequester {
286 #[instrument(skip(self))]
289 pub async fn start_batch(&self, blocks: Vec<H256>) -> Result<CacheBatchId, String> {
290 let (response_tx, response_rx) = oneshot::channel();
291 let sender = self.0.clone();
292
293 sender
294 .unbounded_send(CacheRequest::StartBatch {
295 sender: response_tx,
296 blocks,
297 })
298 .map_err(|e| {
299 format!(
300 "Failed to send request to the trace cache task. Error : {:?}",
301 e
302 )
303 })?;
304
305 response_rx.await.map_err(|e| {
306 format!(
307 "Trace cache task closed the response channel. Error : {:?}",
308 e
309 )
310 })
311 }
312
313 #[instrument(skip(self))]
318 pub async fn get_traces(&self, block: H256) -> TxsTraceRes {
319 let (response_tx, response_rx) = oneshot::channel();
320 let sender = self.0.clone();
321
322 sender
323 .unbounded_send(CacheRequest::GetTraces {
324 sender: response_tx,
325 block,
326 })
327 .map_err(|e| {
328 format!(
329 "Failed to send request to the trace cache task. Error : {:?}",
330 e
331 )
332 })?;
333
334 response_rx
335 .await
336 .map_err(|e| {
337 format!(
338 "Trace cache task closed the response channel. Error : {:?}",
339 e
340 )
341 })?
342 .map_err(|e| format!("Failed to replay block. Error : {:?}", e))
343 }
344
345 #[instrument(skip(self))]
348 pub async fn stop_batch(&self, batch_id: CacheBatchId) {
349 let sender = self.0.clone();
350
351 let _ = sender
354 .unbounded_send(CacheRequest::StopBatch { batch_id })
355 .map_err(|e| {
356 format!(
357 "Failed to send request to the trace cache task. Error : {:?}",
358 e
359 )
360 });
361 }
362}
363
364struct CacheBlock {
371 active_batch_count: usize,
372 state: CacheBlockState,
373}
374
375enum CacheBlockState {
377 Pooled {
380 started: bool,
381 waiting_requests: Vec<oneshot::Sender<TxsTraceRes>>,
385 #[allow(dead_code)]
390 unqueue_sender: oneshot::Sender<()>,
391 },
392 Cached { traces: TxsTraceRes },
395}
396
397enum BlockingTaskMessage {
401 Started { block_hash: H256 },
405 Finished {
407 block_hash: H256,
408 result: TxsTraceRes,
409 },
410}
411
412pub struct CacheTask<B, C, BE> {
414 client: Arc<C>,
415 backend: Arc<BE>,
416 blocking_permits: Arc<Semaphore>,
417 cached_blocks: BTreeMap<H256, CacheBlock>,
418 batches: BTreeMap<u64, Vec<H256>>,
419 next_batch_id: u64,
420 metrics: Option<Metrics>,
421 _phantom: PhantomData<B>,
422}
423
424impl<B, C, BE> CacheTask<B, C, BE>
425where
426 BE: Backend<B> + 'static,
427 BE::State: StateBackend<BlakeTwo256>,
428 C: ProvideRuntimeApi<B>,
429 C: StorageProvider<B, BE>,
430 C: HeaderMetadata<B, Error = BlockChainError> + HeaderBackend<B>,
431 C: Send + Sync + 'static,
432 B: BlockT<Hash = H256> + Send + Sync + 'static,
433 B::Header: HeaderT<Number = u32>,
434 C::Api: BlockBuilder<B>,
435 C::Api: DebugRuntimeApi<B>,
436 C::Api: EthereumRuntimeRPCApi<B>,
437 C::Api: ApiExt<B>,
438{
439 pub fn create(
444 client: Arc<C>,
445 backend: Arc<BE>,
446 cache_duration: Duration,
447 blocking_permits: Arc<Semaphore>,
448 overrides: Arc<dyn StorageOverride<B>>,
449 prometheus: Option<PrometheusRegistry>,
450 ) -> (impl Future<Output = ()>, CacheRequester) {
451 let (requester_tx, mut requester_rx) =
453 sc_utils::mpsc::tracing_unbounded("trace-filter-cache", 100_000);
454
455 let task = async move {
457 let mut batch_expirations = FuturesUnordered::new();
460 let (blocking_tx, mut blocking_rx) =
461 mpsc::channel(blocking_permits.available_permits() * 2);
462 let metrics = if let Some(registry) = prometheus {
463 match Metrics::register(®istry) {
464 Ok(metrics) => Some(metrics),
465 Err(err) => {
466 log::error!(target: "tracing", "Failed to register metrics {err:?}");
467 None
468 }
469 }
470 } else {
471 None
472 };
473 let mut inner = Self {
477 client,
478 backend,
479 blocking_permits,
480 cached_blocks: BTreeMap::new(),
481 batches: BTreeMap::new(),
482 next_batch_id: 0,
483 metrics,
484 _phantom: Default::default(),
485 };
486
487 loop {
490 select! {
491 request = requester_rx.next() => {
492 match request {
493 None => break,
494 Some(CacheRequest::StartBatch {sender, blocks})
495 => inner.request_start_batch(&blocking_tx, sender, blocks, overrides.clone()),
496 Some(CacheRequest::GetTraces {sender, block})
497 => inner.request_get_traces(sender, block),
498 Some(CacheRequest::StopBatch {batch_id}) => {
499 batch_expirations.push(async move {
502 sleep(cache_duration).await;
503 batch_id
504 });
505
506 inner.request_stop_batch(batch_id);
507 },
508 }
509 },
510 message = blocking_rx.recv().fuse() => {
511 match message {
512 None => (),
513 Some(BlockingTaskMessage::Started { block_hash })
514 => inner.blocking_started(block_hash),
515 Some(BlockingTaskMessage::Finished { block_hash, result })
516 => inner.blocking_finished(block_hash, result),
517 }
518 },
519 batch_id = batch_expirations.next() => {
520 match batch_id {
521 None => (),
522 Some(batch_id) => inner.expired_batch(batch_id),
523 }
524 }
525 }
526 }
527 }
528 .instrument(tracing::debug_span!("trace_filter_cache"));
529
530 (task, CacheRequester(requester_tx))
531 }
532
533 #[instrument(skip(self, blocking_tx, sender, blocks, overrides))]
536 fn request_start_batch(
537 &mut self,
538 blocking_tx: &mpsc::Sender<BlockingTaskMessage>,
539 sender: oneshot::Sender<CacheBatchId>,
540 blocks: Vec<H256>,
541 overrides: Arc<dyn StorageOverride<B>>,
542 ) {
543 tracing::trace!("Starting batch {}", self.next_batch_id);
544 self.batches.insert(self.next_batch_id, blocks.clone());
545
546 for block in blocks {
547 if let Some(block_cache) = self.cached_blocks.get_mut(&block) {
549 block_cache.active_batch_count += 1;
550 tracing::trace!(
551 "Cache hit for block {}, now used by {} batches.",
552 block,
553 block_cache.active_batch_count
554 );
555 }
556 else {
558 tracing::trace!("Cache miss for block {}, pooling it for tracing.", block);
559
560 let blocking_permits = Arc::clone(&self.blocking_permits);
561 let (unqueue_sender, unqueue_receiver) = oneshot::channel();
562 let client = Arc::clone(&self.client);
563 let backend = Arc::clone(&self.backend);
564 let blocking_tx = blocking_tx.clone();
565 let overrides = overrides.clone();
566
567 tokio::spawn(
572 async move {
573 tracing::trace!("Waiting for blocking permit or task cancellation");
574 let _permit = select!(
575 _ = unqueue_receiver.fuse() => {
576 tracing::trace!("Tracing of the block has been cancelled.");
577 return;
578 },
579 permit = blocking_permits.acquire().fuse() => permit,
580 );
581
582 let _ = blocking_tx
585 .send(BlockingTaskMessage::Started { block_hash: block })
586 .await;
587
588 tracing::trace!("Start block tracing in a blocking task.");
589
590 let result = async {
592 tokio::task::spawn_blocking(move || {
593 Self::cache_block(client, backend, block, overrides.clone())
594 })
595 .await
596 .map_err(|e| {
597 format!("Tracing Substrate block {} panicked : {:?}", block, e)
598 })?
599 }
600 .await
601 .map_err(|e| e.to_string());
602
603 tracing::trace!("Block tracing finished, sending result to main task.");
604
605 let _ = blocking_tx
607 .send(BlockingTaskMessage::Finished {
608 block_hash: block,
609 result,
610 })
611 .await;
612 }
613 .instrument(tracing::trace_span!("Block tracing", block = %block)),
614 );
615
616 self.cached_blocks.insert(
618 block,
619 CacheBlock {
620 active_batch_count: 1,
621 state: CacheBlockState::Pooled {
622 started: false,
623 waiting_requests: vec![],
624 unqueue_sender,
625 },
626 },
627 );
628 }
629 }
630
631 let _ = sender.send(CacheBatchId(self.next_batch_id));
633
634 self.next_batch_id = self.next_batch_id.overflowing_add(1).0;
636 }
637
638 #[instrument(skip(self))]
646 fn request_get_traces(&mut self, sender: oneshot::Sender<TxsTraceRes>, block: H256) {
647 if let Some(block_cache) = self.cached_blocks.get_mut(&block) {
648 match &mut block_cache.state {
649 CacheBlockState::Pooled {
650 ref mut waiting_requests,
651 ..
652 } => {
653 tracing::warn!(
654 "A request asked a pooled block ({}), adding it to the list of \
655 waiting requests.",
656 block
657 );
658 waiting_requests.push(sender);
659 if let Some(metrics) = &self.metrics {
660 metrics.tracing_cache_misses.inc();
661 }
662 }
663 CacheBlockState::Cached { traces, .. } => {
664 tracing::warn!(
665 "A request asked a cached block ({}), sending the traces directly.",
666 block
667 );
668 let _ = sender.send(traces.clone());
669 if let Some(metrics) = &self.metrics {
670 metrics.tracing_cache_hits.inc();
671 }
672 }
673 }
674 } else {
675 tracing::warn!(
676 "An RPC request asked to get a block ({}) which was not batched.",
677 block
678 );
679 let _ = sender.send(Err(format!(
680 "RPC request asked a block ({}) that was not batched",
681 block
682 )));
683 }
684 }
685
686 #[instrument(skip(self))]
693 fn request_stop_batch(&mut self, batch_id: CacheBatchId) {
694 tracing::trace!("Stopping batch {}", batch_id.0);
695 if let Some(blocks) = self.batches.get(&batch_id.0) {
696 for block in blocks {
697 let mut remove = false;
698
699 if let Some(block_cache) = self.cached_blocks.get_mut(block) {
702 if block_cache.active_batch_count == 1
703 && matches!(
704 block_cache.state,
705 CacheBlockState::Pooled { started: false, .. }
706 ) {
707 remove = true;
708 }
709 }
710
711 if remove {
712 tracing::trace!("Pooled block {} is no longer requested.", block);
713 let _ = self.cached_blocks.remove(block);
716 }
717 }
718 }
719 }
720
721 #[instrument(skip(self))]
724 fn blocking_started(&mut self, block_hash: H256) {
725 if let Some(block_cache) = self.cached_blocks.get_mut(&block_hash) {
726 if let CacheBlockState::Pooled {
727 ref mut started, ..
728 } = block_cache.state
729 {
730 *started = true;
731 }
732 }
733 }
734
735 #[instrument(skip(self, result))]
737 fn blocking_finished(&mut self, block_hash: H256, result: TxsTraceRes) {
738 if let Some(block_cache) = self.cached_blocks.get_mut(&block_hash) {
745 if let CacheBlockState::Pooled {
746 ref mut waiting_requests,
747 ..
748 } = block_cache.state
749 {
750 tracing::trace!(
751 "A new block ({}) has been traced, adding it to the cache and responding to \
752 {} waiting requests.",
753 block_hash,
754 waiting_requests.len()
755 );
756 while let Some(channel) = waiting_requests.pop() {
758 let _ = channel.send(result.clone());
759 }
760
761 block_cache.state = CacheBlockState::Cached { traces: result };
763 }
764 }
765 }
766
767 #[instrument(skip(self))]
770 fn expired_batch(&mut self, batch_id: CacheBatchId) {
771 if let Some(batch) = self.batches.remove(&batch_id.0) {
772 for block in batch {
773 let mut remove = false;
776 if let Some(block_cache) = self.cached_blocks.get_mut(&block) {
777 block_cache.active_batch_count -= 1;
778
779 if block_cache.active_batch_count == 0 {
780 remove = true;
781 }
782 }
783
784 if remove {
785 let _ = self.cached_blocks.remove(&block);
786 }
787 }
788 }
789 }
790
791 #[instrument(skip(client, backend, overrides))]
793 fn cache_block(
794 client: Arc<C>,
795 backend: Arc<BE>,
796 substrate_hash: H256,
797 overrides: Arc<dyn StorageOverride<B>>,
798 ) -> TxsTraceRes {
799 let api = client.runtime_api();
801 let block_header = client
802 .header(substrate_hash)
803 .map_err(|e| {
804 format!(
805 "Error when fetching substrate block {} header : {:?}",
806 substrate_hash, e
807 )
808 })?
809 .ok_or_else(|| format!("Substrate block {} don't exist", substrate_hash))?;
810
811 let height = *block_header.number();
812 let substrate_parent_hash = *block_header.parent_hash();
813
814 let (eth_block, eth_transactions) = match (
816 overrides.current_block(substrate_hash),
817 overrides.current_transaction_statuses(substrate_hash),
818 ) {
819 (Some(a), Some(b)) => (a, b),
820 _ => {
821 return Err(format!(
822 "Failed to get Ethereum block data for Substrate block {}",
823 substrate_hash
824 ))
825 }
826 };
827
828 let eth_block_hash = eth_block.header.hash();
829 let eth_tx_hashes = eth_transactions
830 .iter()
831 .map(|t| t.transaction_hash)
832 .collect();
833
834 let extrinsics = backend
836 .blockchain()
837 .body(substrate_hash)
838 .map_err(|e| {
839 format!(
840 "Blockchain error when fetching extrinsics of block {} : {:?}",
841 height, e
842 )
843 })?
844 .ok_or_else(|| format!("Could not find block {} when fetching extrinsics.", height))?;
845
846 let trace_api_version = if let Ok(Some(api_version)) =
848 api.api_version::<dyn DebugRuntimeApi<B>>(substrate_parent_hash)
849 {
850 api_version
851 } else {
852 return Err("Runtime api version call failed (trace)".to_string());
853 };
854
855 let f = || -> Result<_, String> {
857 let result = if trace_api_version >= 5 {
858 api.trace_block(
859 substrate_parent_hash,
860 extrinsics,
861 eth_tx_hashes,
862 &block_header,
863 )
864 } else {
865 let core_api_version = if let Ok(Some(api_version)) =
867 api.api_version::<dyn Core<B>>(substrate_parent_hash)
868 {
869 api_version
870 } else {
871 return Err("Runtime api version call failed (core)".to_string());
872 };
873
874 if core_api_version >= 5 {
880 api.initialize_block(substrate_parent_hash, &block_header)
881 .map_err(|e| format!("Runtime api access error: {:?}", e))?;
882 } else {
883 #[allow(deprecated)]
884 api.initialize_block_before_version_5(substrate_parent_hash, &block_header)
885 .map_err(|e| format!("Runtime api access error: {:?}", e))?;
886 }
887
888 #[allow(deprecated)]
889 api.trace_block_before_version_5(substrate_parent_hash, extrinsics, eth_tx_hashes)
890 };
891
892 result
893 .map_err(|e| format!("Blockchain error when replaying block {} : {:?}", height, e))?
894 .map_err(|e| {
895 tracing::warn!(
896 target: "tracing",
897 "Internal runtime error when replaying block {} : {:?}",
898 height,
899 e
900 );
901 format!(
902 "Internal runtime error when replaying block {} : {:?}",
903 height, e
904 )
905 })?;
906
907 Ok(moonbeam_rpc_primitives_debug::Response::Block)
908 };
909
910 let eth_transactions_by_index: BTreeMap<u32, H256> = eth_transactions
911 .iter()
912 .map(|t| (t.transaction_index, t.transaction_hash))
913 .collect();
914
915 let mut proxy = moonbeam_client_evm_tracing::listeners::CallList::default();
916 proxy.using(f)?;
917
918 let traces: Vec<TransactionTrace> =
919 moonbeam_client_evm_tracing::formatters::TraceFilter::format(proxy)
920 .ok_or("Fail to format proxy")?
921 .into_iter()
922 .filter_map(|mut trace| {
923 match eth_transactions_by_index.get(&trace.transaction_position) {
924 Some(transaction_hash) => {
925 trace.block_hash = eth_block_hash;
926 trace.block_number = height;
927 trace.transaction_hash = *transaction_hash;
928
929 if let block::TransactionTraceOutput::Error(ref mut error) =
931 trace.output
932 {
933 if error.as_slice() == b"execution reverted" {
934 *error = b"Reverted".to_vec();
935 }
936 }
937
938 Some(trace)
939 }
940 None => {
941 log::warn!(
942 target: "tracing",
943 "A trace in block {} does not map to any known ethereum transaction. Trace: {:?}",
944 height,
945 trace,
946 );
947 None
948 }
949 }
950 })
951 .collect();
952
953 Ok(traces)
954 }
955}
956
957#[derive(Clone)]
959pub(crate) struct Metrics {
960 tracing_cache_hits: Counter<U64>,
961 tracing_cache_misses: Counter<U64>,
962}
963
964impl Metrics {
965 pub(crate) fn register(registry: &PrometheusRegistry) -> Result<Self, PrometheusError> {
966 Ok(Self {
967 tracing_cache_hits: register(
968 Counter::new("tracing_cache_hits", "Number of tracing cache hits.")?,
969 registry,
970 )?,
971 tracing_cache_misses: register(
972 Counter::new("tracing_cache_misses", "Number of tracing cache misses.")?,
973 registry,
974 )?,
975 })
976 }
977}