zebra_network/constants.rs
1//! Definitions of Zebra network constants, including:
2//! - network protocol versions,
3//! - network protocol user agents,
4//! - peer address limits,
5//! - peer connection limits, and
6//! - peer connection timeouts.
7
8use std::{collections::HashMap, time::Duration};
9
10use lazy_static::lazy_static;
11use regex::Regex;
12
13// TODO: should these constants be split into protocol also?
14use crate::protocol::external::types::*;
15
16use zebra_chain::{
17 parameters::{
18 Network::{self, *},
19 NetworkKind,
20 NetworkUpgrade::*,
21 },
22 serialization::Duration32,
23};
24
25/// A multiplier used to calculate the inbound connection limit for the peer set,
26///
27/// When it starts up, Zebra opens [`Config.peerset_initial_target_size`]
28/// outbound connections.
29///
30/// Then it opens additional outbound connections as needed for network requests,
31/// and accepts inbound connections initiated by other peers.
32///
33/// The inbound and outbound connection limits are calculated from:
34///
35/// The inbound limit is:
36/// `Config.peerset_initial_target_size * INBOUND_PEER_LIMIT_MULTIPLIER`.
37/// (This is similar to `zcashd`'s default inbound limit.)
38///
39/// The outbound limit is:
40/// `Config.peerset_initial_target_size * OUTBOUND_PEER_LIMIT_MULTIPLIER`.
41/// (This is a bit larger than `zcashd`'s default outbound limit.)
42///
43/// # Security
44///
45/// Each connection requires one inbound slot and one outbound slot, on two different peers.
46/// But some peers only make outbound connections, because they are behind a firewall,
47/// or their lister port address is misconfigured.
48///
49/// Zebra allows extra inbound connection slots,
50/// to prevent accidental connection slot exhaustion.
51/// (`zcashd` also allows a large number of extra inbound slots.)
52///
53/// ## Security Tradeoff
54///
55/// Since the inbound peer limit is higher than the outbound peer limit,
56/// Zebra can be connected to a majority of peers
57/// that it has *not* chosen from its [`crate::AddressBook`].
58///
59/// Inbound peer connections are initiated by the remote peer,
60/// so inbound peer selection is not controlled by the local node.
61/// This means that an attacker can easily become a majority of a node's peers.
62///
63/// However, connection exhaustion is a higher priority.
64pub const INBOUND_PEER_LIMIT_MULTIPLIER: usize = 5;
65
66/// A multiplier used to calculate the outbound connection limit for the peer set,
67///
68/// See [`INBOUND_PEER_LIMIT_MULTIPLIER`] for details.
69pub const OUTBOUND_PEER_LIMIT_MULTIPLIER: usize = 3;
70
71/// The default maximum number of peer connections Zebra will keep for a given IP address
72/// before it drops any additional peer connections with that IP.
73///
74/// This will be used as `Config.max_connections_per_ip` if no valid value is provided.
75///
76/// Note: Zebra will currently avoid initiating outbound connections where it
77/// has recently had a successful handshake with any address
78/// on that IP. Zebra will not initiate more than 1 outbound connection
79/// to an IP based on the default configuration, but it will accept more inbound
80/// connections to an IP.
81pub const DEFAULT_MAX_CONNS_PER_IP: usize = 1;
82
83/// The default peerset target size.
84///
85/// This will be used as `Config.peerset_initial_target_size` if no valid value is provided.
86pub const DEFAULT_PEERSET_INITIAL_TARGET_SIZE: usize = 25;
87
88/// The maximum number of peers we will add to the address book after each `getaddr` request.
89pub const PEER_ADDR_RESPONSE_LIMIT: usize =
90 DEFAULT_PEERSET_INITIAL_TARGET_SIZE * OUTBOUND_PEER_LIMIT_MULTIPLIER / 2;
91
92/// The buffer size for the peer set.
93///
94/// This should be greater than 1 to avoid sender contention, but also reasonably
95/// small, to avoid queueing too many in-flight block downloads. (A large queue
96/// of in-flight block downloads can choke a constrained local network
97/// connection, or a small peer set on testnet.)
98///
99/// We assume that Zebra nodes have at least 10 Mbps bandwidth. Therefore, a
100/// maximum-sized block can take up to 2 seconds to download. So the peer set
101/// buffer adds up to 6 seconds worth of blocks to the queue.
102pub const PEERSET_BUFFER_SIZE: usize = 3;
103
104/// The timeout for sending a message to a remote peer,
105/// and receiving a response from a remote peer.
106pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(20);
107
108/// The timeout for connections and handshakes when connecting to new peers.
109///
110/// Outbound TCP connections must complete within this timeout,
111/// then the handshake messages get an additional `HANDSHAKE_TIMEOUT` to complete.
112/// (Inbound TCP accepts can't have a timeout, because they are handled by the OS.)
113///
114/// This timeout should remain small, because it helps stop slow peers getting
115/// into the peer set. This is particularly important for network-constrained
116/// nodes, and on testnet.
117pub const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(3);
118
119/// The maximum time difference for two address book changes to be considered concurrent.
120///
121/// This prevents simultaneous or nearby important changes or connection progress
122/// being overridden by less important changes.
123///
124/// This timeout should be less than:
125/// - the [peer reconnection delay](MIN_PEER_RECONNECTION_DELAY), and
126/// - the [peer keepalive/heartbeat interval](HEARTBEAT_INTERVAL).
127///
128/// But more than:
129/// - the amount of time between connection events and address book updates,
130/// even under heavy load (in tests, we have observed delays up to 500ms),
131/// - the delay between an outbound connection failing,
132/// and the [CandidateSet](crate::peer_set::CandidateSet) registering the failure, and
133/// - the delay between the application closing a connection,
134/// and any remaining positive changes from the peer.
135pub const CONCURRENT_ADDRESS_CHANGE_PERIOD: Duration = Duration::from_secs(5);
136
137/// We expect to receive a message from a live peer at least once in this time duration.
138///
139/// This is the sum of:
140/// - the interval between connection heartbeats
141/// - the timeout of a possible pending (already-sent) request
142/// - the timeout for a possible queued request
143/// - the timeout for the heartbeat request itself
144///
145/// This avoids explicit synchronization, but relies on the peer
146/// connector actually setting up channels and these heartbeats in a
147/// specific manner that matches up with this math.
148pub const MIN_PEER_RECONNECTION_DELAY: Duration = Duration::from_secs(59 + 20 + 20 + 20);
149
150/// Zebra rotates its peer inventory registry every time this interval elapses.
151///
152/// After 2 of these intervals, Zebra's local available and missing inventory entries expire.
153pub const INVENTORY_ROTATION_INTERVAL: Duration = Duration::from_secs(53);
154
155/// The default peer address crawler interval.
156///
157/// This should be at least [`HANDSHAKE_TIMEOUT`] lower than all other crawler
158/// intervals.
159///
160/// This makes the following sequence of events more likely:
161/// 1. a peer address crawl,
162/// 2. new peer connections,
163/// 3. peer requests from other crawlers.
164///
165/// Using a prime number makes sure that peer address crawls
166/// don't synchronise with other crawls.
167pub const DEFAULT_CRAWL_NEW_PEER_INTERVAL: Duration = Duration::from_secs(61);
168
169/// The peer address disk cache update interval.
170///
171/// This should be longer than [`DEFAULT_CRAWL_NEW_PEER_INTERVAL`],
172/// but shorter than [`MAX_PEER_ACTIVE_FOR_GOSSIP`].
173///
174/// We use a short interval so Zebra instances which are restarted frequently
175/// still have useful caches.
176pub const PEER_DISK_CACHE_UPDATE_INTERVAL: Duration = Duration::from_secs(5 * 60);
177
178/// The maximum number of addresses in the peer disk cache.
179///
180/// This is chosen to be less than the number of active peers,
181/// and approximately the same as the number of seed peers returned by DNS.
182/// It is a tradeoff between fingerprinting attacks, DNS pollution risk, and cache pollution risk.
183pub const MAX_PEER_DISK_CACHE_SIZE: usize = 75;
184
185/// The maximum duration since a peer was last seen to consider it reachable.
186///
187/// This is used to prevent Zebra from gossiping addresses that are likely unreachable. Peers that
188/// have last been seen more than this duration ago will not be gossiped.
189///
190/// This is determined as a tradeoff between network health and network view leakage. From the
191/// [Bitcoin protocol documentation](https://en.bitcoin.it/wiki/Protocol_documentation#getaddr):
192///
193/// "The typical presumption is that a node is likely to be active if it has been sending a message
194/// within the last three hours."
195pub const MAX_PEER_ACTIVE_FOR_GOSSIP: Duration32 = Duration32::from_hours(3);
196
197/// The maximum duration since a peer was last seen to consider reconnecting to it.
198///
199/// Peers that haven't been seen for more than three days and that had its last connection attempt
200/// fail are considered to be offline and Zebra will stop trying to connect to them.
201///
202/// This is to ensure that Zebra can't have a denial-of-service as a consequence of having too many
203/// offline peers that it constantly and uselessly retries to connect to.
204pub const MAX_RECENT_PEER_AGE: Duration32 = Duration32::from_days(3);
205
206/// Regular interval for sending keepalive `Ping` messages to each
207/// connected peer.
208///
209/// Using a prime number makes sure that heartbeats don't synchronise with crawls.
210pub const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(59);
211
212/// The minimum time between outbound peer connections, implemented by
213/// [`CandidateSet::next`][crate::peer_set::CandidateSet::next].
214///
215/// ## Security
216///
217/// Zebra resists distributed denial of service attacks by making sure that new outbound peer
218/// connections are only initiated after this minimum time has elapsed.
219///
220/// It also enforces a minimum per-peer reconnection interval, and filters failed outbound peers.
221pub const MIN_OUTBOUND_PEER_CONNECTION_INTERVAL: Duration = Duration::from_millis(100);
222
223/// The minimum time between _successful_ inbound peer connections, implemented by
224/// `peer_set::initialize::accept_inbound_connections`.
225///
226/// To support multiple peers connecting simultaneously, this is less than the
227/// [`HANDSHAKE_TIMEOUT`].
228///
229/// ## Security
230///
231/// Zebra resists distributed denial of service attacks by limiting the inbound connection rate.
232/// After a _successful_ inbound connection, new inbound peer connections are only accepted,
233/// and our side of the handshake initiated, after this minimum time has elapsed.
234///
235/// The inbound interval is much longer than the outbound interval, because Zebra does not
236/// control the selection or reconnections of inbound peers.
237pub const MIN_INBOUND_PEER_CONNECTION_INTERVAL: Duration = Duration::from_secs(1);
238
239/// The minimum time between _failed_ inbound peer connections, implemented by
240/// `peer_set::initialize::accept_inbound_connections`.
241///
242/// This is a tradeoff between:
243/// - the memory, CPU, and network usage of each new connection attempt, and
244/// - denying service to honest peers due to an attack which makes many inbound connections.
245///
246/// Attacks that reach this limit should be managed using a firewall or intrusion prevention system.
247///
248/// ## Security
249///
250/// Zebra resists distributed denial of service attacks by limiting the inbound connection rate.
251/// After a _failed_ inbound connection, new inbound peer connections are only accepted,
252/// and our side of the handshake initiated, after this minimum time has elapsed.
253pub const MIN_INBOUND_PEER_FAILED_CONNECTION_INTERVAL: Duration = Duration::from_millis(10);
254
255/// The minimum time between successive calls to
256/// [`CandidateSet::update`][crate::peer_set::CandidateSet::update].
257///
258/// Using a prime number makes sure that peer address crawls don't synchronise with other crawls.
259///
260/// ## Security
261///
262/// Zebra resists distributed denial of service attacks by making sure that requests for more
263/// peer addresses are sent at least [`MIN_PEER_GET_ADDR_INTERVAL`] apart.
264pub const MIN_PEER_GET_ADDR_INTERVAL: Duration = Duration::from_secs(31);
265
266/// The combined timeout for all the requests in
267/// [`CandidateSet::update`][crate::peer_set::CandidateSet::update].
268///
269/// `zcashd` doesn't respond to most `getaddr` requests,
270/// so this timeout needs to be short.
271pub const PEER_GET_ADDR_TIMEOUT: Duration = Duration::from_secs(8);
272
273/// The number of GetAddr requests sent when crawling for new peers.
274///
275/// # Security
276///
277/// The fanout should be greater than 2, so that Zebra avoids getting a majority
278/// of its initial address book entries from a single peer.
279///
280/// Zebra regularly crawls for new peers, initiating a new crawl every
281/// [`crawl_new_peer_interval`](crate::config::Config.crawl_new_peer_interval).
282///
283/// TODO: Restore the fanout to 3, once fanouts are limited to the number of ready peers (#2214)
284///
285/// In #3110, we changed the fanout to 1, to make sure we actually use cached address responses.
286/// With a fanout of 3, we were dropping a lot of responses, because the overall crawl timed out.
287pub const GET_ADDR_FANOUT: usize = 1;
288
289/// The maximum number of addresses allowed in an `addr` or `addrv2` message.
290///
291/// `addr`:
292/// > The number of IP address entries up to a maximum of 1,000.
293///
294/// <https://developer.bitcoin.org/reference/p2p_networking.html#addr>
295///
296/// `addrv2`:
297/// > One message can contain up to 1,000 addresses.
298/// > Clients MUST reject messages with more addresses.
299///
300/// <https://zips.z.cash/zip-0155#specification>
301pub const MAX_ADDRS_IN_MESSAGE: usize = 1000;
302
303/// The fraction of addresses Zebra sends in response to a `Peers` request.
304///
305/// Each response contains approximately:
306/// `address_book.len() / ADDR_RESPONSE_LIMIT_DENOMINATOR`
307/// addresses, selected at random from the address book.
308///
309/// # Security
310///
311/// This limit makes sure that Zebra does not reveal its entire address book
312/// in a single `Peers` response.
313pub const ADDR_RESPONSE_LIMIT_DENOMINATOR: usize = 4;
314
315/// The maximum number of addresses Zebra will keep in its address book.
316///
317/// This is a tradeoff between:
318/// - revealing the whole address book in a few requests,
319/// - sending the maximum number of peer addresses, and
320/// - making sure the limit code actually gets run.
321pub const MAX_ADDRS_IN_ADDRESS_BOOK: usize =
322 MAX_ADDRS_IN_MESSAGE * (ADDR_RESPONSE_LIMIT_DENOMINATOR + 1);
323
324/// Truncate timestamps in outbound address messages to this time interval.
325///
326/// ## SECURITY
327///
328/// Timestamp truncation prevents a peer from learning exactly when we received
329/// messages from each of our peers.
330pub const TIMESTAMP_TRUNCATION_SECONDS: u32 = 30 * 60;
331
332/// The Zcash network protocol version implemented by this crate, and advertised
333/// during connection setup.
334///
335/// The current protocol version is checked by our peers. If it is too old,
336/// newer peers will disconnect from us.
337///
338/// The current protocol version typically changes before Mainnet and Testnet
339/// network upgrades.
340///
341/// This version of Zebra draws the current network protocol version from
342/// [ZIP-253](https://zips.z.cash/zip-0253).
343// TODO: Update this constant to the correct value after NU6.1 & NU7 activation,
344// pub const CURRENT_NETWORK_PROTOCOL_VERSION: Version = Version(170_140); // NU6.1
345// pub const CURRENT_NETWORK_PROTOCOL_VERSION: Version = Version(170_160); // NU7
346pub const CURRENT_NETWORK_PROTOCOL_VERSION: Version = Version(170_120);
347
348/// The default RTT estimate for peer responses.
349///
350/// We choose a high value for the default RTT, so that new peers must prove they
351/// are fast, before we prefer them to other peers. This is particularly
352/// important on testnet, which has a small number of peers, which are often
353/// slow.
354///
355/// Make the default RTT slightly higher than the request timeout.
356pub const EWMA_DEFAULT_RTT: Duration = Duration::from_secs(REQUEST_TIMEOUT.as_secs() + 1);
357
358/// The decay time for the EWMA response time metric used for load balancing.
359///
360/// This should be much larger than the `SYNC_RESTART_TIMEOUT`, so we choose
361/// better peers when we restart the sync.
362pub const EWMA_DECAY_TIME_NANOS: f64 = 200.0 * NANOS_PER_SECOND;
363
364/// The number of nanoseconds in one second.
365const NANOS_PER_SECOND: f64 = 1_000_000_000.0;
366
367/// The duration it takes for the drop probability of an overloaded connection to
368/// reach [`MIN_OVERLOAD_DROP_PROBABILITY`].
369///
370/// Peer connections that receive multiple overloads have a higher probability of being dropped.
371///
372/// The probability of a connection being dropped gradually decreases during this interval
373/// until it reaches the default drop probability ([`MIN_OVERLOAD_DROP_PROBABILITY`]).
374///
375/// Increasing this number increases the rate at which connections are dropped.
376pub const OVERLOAD_PROTECTION_INTERVAL: Duration = MIN_INBOUND_PEER_CONNECTION_INTERVAL;
377
378/// The minimum probability of dropping a peer connection when it receives an
379/// [`Overloaded`](crate::PeerError::Overloaded) error.
380pub const MIN_OVERLOAD_DROP_PROBABILITY: f32 = 0.05;
381
382/// The maximum probability of dropping a peer connection when it receives an
383/// [`Overloaded`](crate::PeerError::Overloaded) error.
384pub const MAX_OVERLOAD_DROP_PROBABILITY: f32 = 0.5;
385
386/// The minimum interval between logging peer set status updates.
387pub const MIN_PEER_SET_LOG_INTERVAL: Duration = Duration::from_secs(60);
388
389/// The maximum number of peer misbehavior incidents before a peer is
390/// disconnected and banned.
391pub const MAX_PEER_MISBEHAVIOR_SCORE: u32 = 100;
392
393/// The maximum number of banned IP addresses to be stored in-memory at any time.
394pub const MAX_BANNED_IPS: usize = 20_000;
395
396lazy_static! {
397 /// The minimum network protocol version accepted by this crate for each network,
398 /// represented as a network upgrade.
399 ///
400 /// The minimum protocol version is used to check the protocol versions of our
401 /// peers during the initial block download. After the initial block download,
402 /// we use the current block height to select the minimum network protocol
403 /// version.
404 ///
405 /// If peer versions are too old, we will disconnect from them.
406 ///
407 /// The minimum network protocol version typically changes after Mainnet and
408 /// Testnet network upgrades.
409 // TODO: Change `Nu6` to `Nu7` after NU7 activation.
410 // TODO: Move the value here to a field on `testnet::Parameters` (#8367)
411 pub static ref INITIAL_MIN_NETWORK_PROTOCOL_VERSION: HashMap<NetworkKind, Version> = {
412 let mut hash_map = HashMap::new();
413
414 hash_map.insert(NetworkKind::Mainnet, Version::min_specified_for_upgrade(&Mainnet, Nu6));
415 hash_map.insert(NetworkKind::Testnet, Version::min_specified_for_upgrade(&Network::new_default_testnet(), Nu6));
416 hash_map.insert(NetworkKind::Regtest, Version::min_specified_for_upgrade(&Network::new_regtest(Default::default()), Nu6));
417
418 hash_map
419 };
420
421 /// OS-specific error when the port attempting to be opened is already in use.
422 pub static ref PORT_IN_USE_ERROR: Regex = if cfg!(unix) {
423 #[allow(clippy::trivial_regex)]
424 Regex::new(®ex::escape("already in use"))
425 } else {
426 Regex::new("(access a socket in a way forbidden by its access permissions)|(Only one usage of each socket address)")
427 }.expect("regex is valid");
428}
429
430/// The timeout for DNS lookups.
431///
432/// [6.1.3.3 Efficient Resource Usage] from [RFC 1123: Requirements for Internet Hosts]
433/// suggest no less than 5 seconds for resolving timeout.
434///
435/// [RFC 1123: Requirements for Internet Hosts] <https://tools.ietf.org/rfcmarkup?doc=1123>
436/// [6.1.3.3 Efficient Resource Usage] <https://tools.ietf.org/rfcmarkup?doc=1123#page-77>
437pub const DNS_LOOKUP_TIMEOUT: Duration = Duration::from_secs(5);
438
439#[cfg(test)]
440mod tests {
441 use zebra_chain::parameters::POST_BLOSSOM_POW_TARGET_SPACING;
442
443 use super::*;
444
445 /// This assures that the `Duration` value we are computing for
446 /// [`MIN_PEER_RECONNECTION_DELAY`] actually matches the other const values
447 /// it relies on.
448 #[test]
449 fn ensure_live_peer_duration_value_matches_others() {
450 let _init_guard = zebra_test::init();
451
452 let constructed_live_peer_duration =
453 HEARTBEAT_INTERVAL + REQUEST_TIMEOUT + REQUEST_TIMEOUT + REQUEST_TIMEOUT;
454
455 assert_eq!(MIN_PEER_RECONNECTION_DELAY, constructed_live_peer_duration);
456 }
457
458 /// Make sure that the timeout values are consistent with each other.
459 #[test]
460 fn ensure_timeouts_consistent() {
461 let _init_guard = zebra_test::init();
462
463 assert!(HANDSHAKE_TIMEOUT <= REQUEST_TIMEOUT,
464 "Handshakes are requests, so the handshake timeout can't be longer than the timeout for all requests.");
465 // This check is particularly important on testnet, which has a small
466 // number of peers, which are often slow.
467 assert!(EWMA_DEFAULT_RTT > REQUEST_TIMEOUT,
468 "The default EWMA RTT should be higher than the request timeout, so new peers are required to prove they are fast, before we prefer them to other peers.");
469
470 let request_timeout_nanos = REQUEST_TIMEOUT.as_secs_f64()
471 + f64::from(REQUEST_TIMEOUT.subsec_nanos()) * NANOS_PER_SECOND;
472
473 assert!(EWMA_DECAY_TIME_NANOS > request_timeout_nanos,
474 "The EWMA decay time should be higher than the request timeout, so timed out peers are penalised by the EWMA.");
475
476 assert!(
477 MIN_PEER_RECONNECTION_DELAY.as_secs() as f32
478 / (u32::try_from(MAX_ADDRS_IN_ADDRESS_BOOK).expect("fits in u32")
479 * MIN_OUTBOUND_PEER_CONNECTION_INTERVAL)
480 .as_secs() as f32
481 >= 0.2,
482 "some peers should get a connection attempt in each connection interval",
483 );
484
485 assert!(
486 MIN_PEER_RECONNECTION_DELAY.as_secs() as f32
487 / (u32::try_from(MAX_ADDRS_IN_ADDRESS_BOOK).expect("fits in u32")
488 * MIN_OUTBOUND_PEER_CONNECTION_INTERVAL)
489 .as_secs() as f32
490 <= 2.0,
491 "each peer should only have a few connection attempts in each connection interval",
492 );
493 }
494
495 /// Make sure that peer age limits are consistent with each other.
496 #[test]
497 fn ensure_peer_age_limits_consistent() {
498 let _init_guard = zebra_test::init();
499
500 assert!(
501 MAX_PEER_ACTIVE_FOR_GOSSIP <= MAX_RECENT_PEER_AGE,
502 "we should only gossip peers we are actually willing to try ourselves"
503 );
504 }
505
506 /// Make sure the address limits are consistent with each other.
507 #[test]
508 #[allow(clippy::assertions_on_constants)]
509 fn ensure_address_limits_consistent() {
510 // Estimated network address book size in November 2023, after the address book limit was increased.
511 // Zebra 1.0.0-beta.2 address book metrics in December 2021 showed 4500 peers.
512 const TYPICAL_MAINNET_ADDRESS_BOOK_SIZE: usize = 5_500;
513
514 let _init_guard = zebra_test::init();
515
516 assert!(
517 MAX_ADDRS_IN_ADDRESS_BOOK >= GET_ADDR_FANOUT * MAX_ADDRS_IN_MESSAGE,
518 "the address book should hold at least a fanout's worth of addresses"
519 );
520
521 assert!(
522 MAX_ADDRS_IN_ADDRESS_BOOK / ADDR_RESPONSE_LIMIT_DENOMINATOR > MAX_ADDRS_IN_MESSAGE,
523 "the address book should hold enough addresses for a full response"
524 );
525
526 assert!(
527 MAX_ADDRS_IN_ADDRESS_BOOK <= TYPICAL_MAINNET_ADDRESS_BOOK_SIZE,
528 "the address book limit should actually be used"
529 );
530 }
531
532 /// Make sure inventory registry rotation is consistent with the target block interval.
533 #[test]
534 fn ensure_inventory_rotation_consistent() {
535 let _init_guard = zebra_test::init();
536
537 assert!(
538 INVENTORY_ROTATION_INTERVAL
539 < Duration::from_secs(POST_BLOSSOM_POW_TARGET_SPACING.into()),
540 "we should expire inventory every time 1-2 new blocks get generated"
541 );
542 }
543}