zebra_chain/work/difficulty.rs
1//! Block difficulty data structures and calculations
2//!
3//! The block difficulty "target threshold" is stored in the block header as a
4//! 32-bit `CompactDifficulty`. The `block::Hash` must be less than or equal
5//! to the `ExpandedDifficulty` threshold, when represented as a 256-bit integer
6//! in little-endian order.
7//!
8//! The target threshold is also used to calculate the `Work` for each block.
9//! The block work is used to find the chain with the greatest total work. Each
10//! block's work value depends on the fixed threshold in the block header, not
11//! the actual work represented by the block header hash.
12
13use std::{
14 cmp::{Ordering, PartialEq, PartialOrd},
15 fmt,
16 iter::Sum,
17 ops::{Add, Div, Mul},
18};
19
20use hex::{FromHex, ToHex};
21
22use crate::{block, parameters::Network, BoxError};
23
24pub use crate::work::u256::U256;
25
26#[cfg(any(test, feature = "proptest-impl"))]
27mod arbitrary;
28#[cfg(test)]
29mod tests;
30
31/// A 32-bit "compact bits" value, which represents the difficulty threshold for
32/// a block header.
33///
34/// Used for:
35/// - checking the `difficulty_threshold` value in the block header,
36/// - calculating the 256-bit `ExpandedDifficulty` threshold, for comparison
37/// with the block header hash, and
38/// - calculating the block work.
39///
40/// # Consensus
41///
42/// This is a floating-point encoding, with a 24-bit signed mantissa,
43/// an 8-bit exponent, an offset of 3, and a radix of 256.
44/// (IEEE 754 32-bit floating-point values use a separate sign bit, an implicit
45/// leading mantissa bit, an offset of 127, and a radix of 2.)
46///
47/// The precise bit pattern of a `CompactDifficulty` value is
48/// consensus-critical, because it is used for the `difficulty_threshold` field,
49/// which is:
50/// - part of the `BlockHeader`, which is used to create the
51/// `block::Hash`, and
52/// - bitwise equal to the median `ExpandedDifficulty` value of recent blocks,
53/// when encoded to `CompactDifficulty` using the specified conversion
54/// function.
55///
56/// Without these consensus rules, some `ExpandedDifficulty` values would have
57/// multiple equivalent `CompactDifficulty` values, due to redundancy in the
58/// floating-point format.
59///
60/// > Deterministic conversions between a target threshold and a “compact" nBits value
61/// > are not fully defined in the Bitcoin documentation, and so we define them here:
62/// > (see equations in the Zcash Specification [section 7.7.4])
63///
64/// [section 7.7.4]: https://zips.z.cash/protocol/protocol.pdf#nbits
65#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize)]
66#[cfg_attr(any(test, feature = "proptest-impl"), derive(Default))]
67pub struct CompactDifficulty(pub(crate) u32);
68
69/// An invalid CompactDifficulty value, for testing.
70pub const INVALID_COMPACT_DIFFICULTY: CompactDifficulty = CompactDifficulty(u32::MAX);
71
72/// A 256-bit unsigned "expanded difficulty" value.
73///
74/// Used as a target threshold for the difficulty of a `block::Hash`.
75///
76/// # Consensus
77///
78/// The precise bit pattern of an `ExpandedDifficulty` value is
79/// consensus-critical, because it is compared with the `block::Hash`.
80///
81/// Note that each `CompactDifficulty` value can be converted from a
82/// range of `ExpandedDifficulty` values, because the precision of
83/// the floating-point format requires rounding on conversion.
84///
85/// Therefore, consensus-critical code must perform the specified
86/// conversions to `CompactDifficulty`, even if the original
87/// `ExpandedDifficulty` values are known.
88///
89/// Callers should avoid constructing `ExpandedDifficulty` zero
90/// values, because they are rejected by the consensus rules,
91/// and cause some conversion functions to panic.
92///
93/// > The difficulty filter is unchanged from Bitcoin, and is calculated using SHA-256d on the
94/// > whole block header (including solutionSize and solution). The result is interpreted as a
95/// > 256-bit integer represented in little-endian byte order, which MUST be less than or equal
96/// > to the target threshold given by ToTarget(nBits).
97///
98/// Zcash Specification [section 7.7.2].
99///
100/// [section 7.7.2]: https://zips.z.cash/protocol/protocol.pdf#difficulty
101//
102// TODO: Use NonZeroU256, when available
103#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
104pub struct ExpandedDifficulty(U256);
105
106/// A 128-bit unsigned "Work" value.
107///
108/// Used to calculate the total work for each chain of blocks.
109///
110/// # Consensus
111///
112/// The relative value of `Work` is consensus-critical, because it is used to
113/// choose the best chain. But its precise value and bit pattern are not
114/// consensus-critical.
115///
116/// We calculate work values according to the Zcash specification, but store
117/// them as u128, rather than the implied u256. We don't expect the total chain
118/// work to ever exceed 2^128. The current total chain work for Zcash is 2^58,
119/// and Bitcoin adds around 2^91 work per year. (Each extra bit represents twice
120/// as much work.)
121///
122/// > a node chooses the “best” block chain visible to it by finding the chain of valid blocks
123/// > with the greatest total work. The work of a block with value nBits for the nBits field in
124/// > its block header is defined as `floor(2^256 / (ToTarget(nBits) + 1))`.
125///
126/// Zcash Specification [section 7.7.5].
127///
128/// [section 7.7.5]: https://zips.z.cash/protocol/protocol.pdf#workdef
129#[derive(Clone, Copy, Default, Eq, PartialEq, Ord, PartialOrd)]
130pub struct Work(u128);
131
132impl Work {
133 /// Returns a value representing no work.
134 pub fn zero() -> Self {
135 Self(0)
136 }
137
138 /// Return the inner `u128` value.
139 pub fn as_u128(self) -> u128 {
140 self.0
141 }
142}
143
144impl fmt::Debug for Work {
145 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
146 // There isn't a standard way to show different representations of the
147 // same value
148 f.debug_tuple("Work")
149 // Use hex, because expanded difficulty is in hex.
150 .field(&format_args!("{:#x}", self.0))
151 // Use decimal, to compare with zcashd
152 .field(&format_args!("{}", self.0))
153 // Use log2, to compare with zcashd
154 .field(&format_args!("{:.5}", (self.0 as f64).log2()))
155 .finish()
156 }
157}
158
159impl CompactDifficulty {
160 /// CompactDifficulty exponent base.
161 const BASE: u32 = 256;
162
163 /// CompactDifficulty exponent offset.
164 const OFFSET: i32 = 3;
165
166 /// CompactDifficulty floating-point precision.
167 const PRECISION: u32 = 24;
168
169 /// CompactDifficulty sign bit, part of the signed mantissa.
170 const SIGN_BIT: u32 = 1 << (CompactDifficulty::PRECISION - 1);
171
172 /// CompactDifficulty unsigned mantissa mask.
173 ///
174 /// Also the maximum unsigned mantissa value.
175 const UNSIGNED_MANTISSA_MASK: u32 = CompactDifficulty::SIGN_BIT - 1;
176
177 /// Calculate the ExpandedDifficulty for a compact representation.
178 ///
179 /// See `ToTarget()` in the Zcash Specification, and `CheckProofOfWork()` in
180 /// zcashd:
181 /// <https://zips.z.cash/protocol/protocol.pdf#nbits>
182 ///
183 /// Returns None for negative, zero, and overflow values. (zcashd rejects
184 /// these values, before comparing the hash.)
185 #[allow(clippy::unwrap_in_result)]
186 pub fn to_expanded(self) -> Option<ExpandedDifficulty> {
187 // The constants for this floating-point representation.
188 // Alias the struct constants here, so the code is easier to read.
189 const BASE: u32 = CompactDifficulty::BASE;
190 const OFFSET: i32 = CompactDifficulty::OFFSET;
191 const PRECISION: u32 = CompactDifficulty::PRECISION;
192 const SIGN_BIT: u32 = CompactDifficulty::SIGN_BIT;
193 const UNSIGNED_MANTISSA_MASK: u32 = CompactDifficulty::UNSIGNED_MANTISSA_MASK;
194
195 // Negative values in this floating-point representation.
196 // 0 if (x & 2^23 == 2^23)
197 // zcashd rejects negative values without comparing the hash.
198 if self.0 & SIGN_BIT == SIGN_BIT {
199 return None;
200 }
201
202 // The components of the result
203 // The fractional part of the floating-point number
204 // x & (2^23 - 1)
205 let mantissa = self.0 & UNSIGNED_MANTISSA_MASK;
206
207 // The exponent for the multiplier in the floating-point number
208 // 256^(floor(x/(2^24)) - 3)
209 //
210 // The i32 conversion is safe, because we've just divided self by 2^24.
211 let exponent = i32::try_from(self.0 >> PRECISION).expect("fits in i32") - OFFSET;
212
213 // Normalise the mantissa and exponent before multiplying.
214 //
215 // zcashd rejects non-zero overflow values, but accepts overflows where
216 // all the overflowing bits are zero. It also allows underflows.
217 let (mantissa, exponent) = match (mantissa, exponent) {
218 // Overflow: check for non-zero overflow bits
219 //
220 // If m is non-zero, overflow. If m is zero, invalid.
221 (_, e) if (e >= 32) => return None,
222 // If m is larger than the remaining bytes, overflow.
223 // Otherwise, avoid overflows in base^exponent.
224 (m, e) if (e == 31 && m > u8::MAX.into()) => return None,
225 (m, e) if (e == 31 && m <= u8::MAX.into()) => (m << 16, e - 2),
226 (m, e) if (e == 30 && m > u16::MAX.into()) => return None,
227 (m, e) if (e == 30 && m <= u16::MAX.into()) => (m << 8, e - 1),
228
229 // Underflow: perform the right shift.
230 // The abs is safe, because we've just divided by 2^24, and offset
231 // is small.
232 (m, e) if (e < 0) => (m >> ((e.abs() * 8) as u32), 0),
233 (m, e) => (m, e),
234 };
235
236 // Now calculate the result: mantissa*base^exponent
237 // Earlier code should make sure all these values are in range.
238 let mantissa: U256 = mantissa.into();
239 let base: U256 = BASE.into();
240 let exponent: U256 = exponent.into();
241 let result = mantissa * base.pow(exponent);
242
243 if result == U256::zero() {
244 // zcashd rejects zero values, without comparing the hash
245 None
246 } else {
247 Some(result.into())
248 }
249 }
250
251 /// Calculate the Work for a compact representation.
252 ///
253 /// See `Definition of Work` in the [Zcash Specification], and
254 /// `GetBlockProof()` in zcashd.
255 ///
256 /// Returns None if the corresponding ExpandedDifficulty is None.
257 /// Also returns None on Work overflow, which should be impossible on a
258 /// valid chain.
259 ///
260 /// [Zcash Specification]: https://zips.z.cash/protocol/protocol.pdf#workdef
261 pub fn to_work(self) -> Option<Work> {
262 let expanded = self.to_expanded()?;
263 Work::try_from(expanded).ok()
264 }
265
266 /// Return the difficulty bytes in big-endian byte-order.
267 ///
268 /// Zebra displays difficulties in big-endian byte-order,
269 /// following the u256 convention set by Bitcoin and zcashd.
270 pub fn bytes_in_display_order(&self) -> [u8; 4] {
271 self.0.to_be_bytes()
272 }
273
274 /// Convert bytes in big-endian byte-order into a [`CompactDifficulty`].
275 ///
276 /// Zebra displays difficulties in big-endian byte-order,
277 /// following the u256 convention set by Bitcoin and zcashd.
278 ///
279 /// Returns an error if the difficulty value is invalid.
280 pub fn from_bytes_in_display_order(
281 bytes_in_display_order: &[u8; 4],
282 ) -> Result<CompactDifficulty, BoxError> {
283 let internal_byte_order = u32::from_be_bytes(*bytes_in_display_order);
284
285 let difficulty = CompactDifficulty(internal_byte_order);
286
287 if difficulty.to_expanded().is_none() {
288 return Err("invalid difficulty value".into());
289 }
290
291 Ok(difficulty)
292 }
293
294 /// Returns a floating-point number representing a difficulty as a multiple
295 /// of the minimum difficulty for the provided network.
296 // Copied from <https://github.com/zcash/zcash/blob/99ad6fdc3a549ab510422820eea5e5ce9f60a5fd/src/rpc/blockchain.cpp#L34-L74>
297 // TODO: Explain here what this ported code is doing and why, request help to do so with the ECC team.
298 pub fn relative_to_network(&self, network: &Network) -> f64 {
299 let network_difficulty = network.target_difficulty_limit().to_compact();
300
301 let [mut n_shift, ..] = self.0.to_be_bytes();
302 let [n_shift_amount, ..] = network_difficulty.0.to_be_bytes();
303 let mut d_diff = f64::from(network_difficulty.0 << 8) / f64::from(self.0 << 8);
304
305 while n_shift < n_shift_amount {
306 d_diff *= 256.0;
307 n_shift += 1;
308 }
309
310 while n_shift > n_shift_amount {
311 d_diff /= 256.0;
312 n_shift -= 1;
313 }
314
315 d_diff
316 }
317}
318
319impl fmt::Debug for CompactDifficulty {
320 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
321 // There isn't a standard way to show different representations of the
322 // same value
323 f.debug_tuple("CompactDifficulty")
324 // Use hex, because it's a float
325 .field(&format_args!("{:#010x}", self.0))
326 // Use expanded difficulty, for bitwise difficulty comparisons
327 .field(&format_args!("{:?}", self.to_expanded()))
328 .finish()
329 }
330}
331
332impl fmt::Display for CompactDifficulty {
333 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
334 f.write_str(&self.encode_hex::<String>())
335 }
336}
337
338impl ToHex for &CompactDifficulty {
339 fn encode_hex<T: FromIterator<char>>(&self) -> T {
340 self.bytes_in_display_order().encode_hex()
341 }
342
343 fn encode_hex_upper<T: FromIterator<char>>(&self) -> T {
344 self.bytes_in_display_order().encode_hex_upper()
345 }
346}
347
348impl ToHex for CompactDifficulty {
349 fn encode_hex<T: FromIterator<char>>(&self) -> T {
350 (&self).encode_hex()
351 }
352
353 fn encode_hex_upper<T: FromIterator<char>>(&self) -> T {
354 (&self).encode_hex_upper()
355 }
356}
357
358impl FromHex for CompactDifficulty {
359 type Error = BoxError;
360
361 fn from_hex<T: AsRef<[u8]>>(hex: T) -> Result<Self, Self::Error> {
362 let bytes_in_display_order = <[u8; 4]>::from_hex(hex)?;
363
364 CompactDifficulty::from_bytes_in_display_order(&bytes_in_display_order)
365 }
366}
367
368impl TryFrom<ExpandedDifficulty> for Work {
369 type Error = ();
370
371 fn try_from(expanded: ExpandedDifficulty) -> Result<Self, Self::Error> {
372 // Consensus:
373 //
374 // <https://zips.z.cash/protocol/protocol.pdf#workdef>
375 //
376 // We need to compute `2^256 / (expanded + 1)`, but we can't represent
377 // 2^256, as it's too large for a u256. However, as 2^256 is at least as
378 // large as `expanded + 1`, it is equal to
379 // `((2^256 - expanded - 1) / (expanded + 1)) + 1`, or
380 let result = (!expanded.0 / (expanded.0 + 1)) + 1;
381 if result <= u128::MAX.into() {
382 Ok(Work(result.as_u128()))
383 } else {
384 Err(())
385 }
386 }
387}
388
389impl From<ExpandedDifficulty> for CompactDifficulty {
390 fn from(value: ExpandedDifficulty) -> Self {
391 value.to_compact()
392 }
393}
394
395impl ExpandedDifficulty {
396 /// Returns the difficulty of the hash.
397 ///
398 /// Used to implement comparisons between difficulties and hashes.
399 ///
400 /// Usage:
401 ///
402 /// Compare the hash with the calculated difficulty value, using Rust's
403 /// standard comparison operators.
404 ///
405 /// Hashes are not used to calculate the difficulties of future blocks, so
406 /// users of this module should avoid converting hashes into difficulties.
407 pub(super) fn from_hash(hash: &block::Hash) -> ExpandedDifficulty {
408 U256::from_little_endian(&hash.0).into()
409 }
410
411 /// Calculate the CompactDifficulty for an expanded difficulty.
412 ///
413 /// # Consensus
414 ///
415 /// See `ToCompact()` in the Zcash Specification, and `GetCompact()`
416 /// in zcashd:
417 /// <https://zips.z.cash/protocol/protocol.pdf#nbits>
418 ///
419 /// # Panics
420 ///
421 /// If `self` is zero.
422 ///
423 /// `ExpandedDifficulty` values are generated in two ways:
424 /// * conversion from `CompactDifficulty` values, which rejects zeroes, and
425 /// * difficulty adjustment calculations, which impose a non-zero minimum
426 /// `target_difficulty_limit`.
427 ///
428 /// Neither of these methods yield zero values.
429 pub fn to_compact(self) -> CompactDifficulty {
430 // The zcashd implementation supports negative and zero compact values.
431 // These values are rejected by the protocol rules. Zebra is designed so
432 // that invalid states are not representable. Therefore, this function
433 // does not produce negative compact values, and panics on zero compact
434 // values. (The negative compact value code in zcashd is unused.)
435 assert!(self.0 > 0.into(), "Zero difficulty values are invalid");
436
437 // The constants for this floating-point representation.
438 // Alias the constants here, so the code is easier to read.
439 const UNSIGNED_MANTISSA_MASK: u32 = CompactDifficulty::UNSIGNED_MANTISSA_MASK;
440 const OFFSET: i32 = CompactDifficulty::OFFSET;
441
442 // Calculate the final size, accounting for the sign bit.
443 // This is the size *after* applying the sign bit adjustment in `ToCompact()`.
444 let size = self.0.bits() / 8 + 1;
445
446 // Make sure the mantissa is non-negative, by shifting down values that
447 // would otherwise overflow into the sign bit
448 let mantissa = if self.0 <= UNSIGNED_MANTISSA_MASK.into() {
449 // Value is small, shift up if needed
450 self.0 << (8 * (3 - size))
451 } else {
452 // Value is large, shift down
453 self.0 >> (8 * (size - 3))
454 };
455
456 // This assertion also makes sure that size fits in its 8 bit compact field
457 assert!(
458 size < (31 + OFFSET) as _,
459 "256^size (256^{size}) must fit in a u256, after the sign bit adjustment and offset"
460 );
461 let size = u32::try_from(size).expect("a 0-6 bit value fits in a u32");
462
463 assert!(
464 mantissa <= UNSIGNED_MANTISSA_MASK.into(),
465 "mantissa {mantissa:x?} must fit in its compact field"
466 );
467 let mantissa = u32::try_from(mantissa).expect("a 0-23 bit value fits in a u32");
468
469 if mantissa > 0 {
470 CompactDifficulty(mantissa + (size << 24))
471 } else {
472 // This check catches invalid mantissas. Overflows and underflows
473 // should also be unreachable, but they aren't caught here.
474 unreachable!("converted CompactDifficulty values must be valid")
475 }
476 }
477
478 /// Return the difficulty bytes in big-endian byte-order,
479 /// suitable for printing out byte by byte.
480 ///
481 /// Zebra displays difficulties in big-endian byte-order,
482 /// following the u256 convention set by Bitcoin and zcashd.
483 pub fn bytes_in_display_order(&self) -> [u8; 32] {
484 self.0.to_big_endian()
485 }
486
487 /// Convert bytes in big-endian byte-order into an [`ExpandedDifficulty`].
488 ///
489 /// Zebra displays difficulties in big-endian byte-order,
490 /// following the u256 convention set by Bitcoin and zcashd.
491 ///
492 /// Preserves the exact difficulty value represented by the bytes,
493 /// even if it can't be generated from a [`CompactDifficulty`].
494 /// This means a round-trip conversion to [`CompactDifficulty`] can be lossy.
495 pub fn from_bytes_in_display_order(bytes_in_display_order: &[u8; 32]) -> ExpandedDifficulty {
496 let internal_byte_order = U256::from_big_endian(bytes_in_display_order);
497
498 ExpandedDifficulty(internal_byte_order)
499 }
500}
501
502impl fmt::Display for ExpandedDifficulty {
503 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
504 f.write_str(&self.encode_hex::<String>())
505 }
506}
507
508impl fmt::Debug for ExpandedDifficulty {
509 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
510 f.debug_tuple("ExpandedDifficulty")
511 .field(&self.encode_hex::<String>())
512 .finish()
513 }
514}
515
516impl ToHex for &ExpandedDifficulty {
517 fn encode_hex<T: FromIterator<char>>(&self) -> T {
518 self.bytes_in_display_order().encode_hex()
519 }
520
521 fn encode_hex_upper<T: FromIterator<char>>(&self) -> T {
522 self.bytes_in_display_order().encode_hex_upper()
523 }
524}
525
526impl ToHex for ExpandedDifficulty {
527 fn encode_hex<T: FromIterator<char>>(&self) -> T {
528 (&self).encode_hex()
529 }
530
531 fn encode_hex_upper<T: FromIterator<char>>(&self) -> T {
532 (&self).encode_hex_upper()
533 }
534}
535
536impl FromHex for ExpandedDifficulty {
537 type Error = <[u8; 32] as FromHex>::Error;
538
539 fn from_hex<T: AsRef<[u8]>>(hex: T) -> Result<Self, Self::Error> {
540 let bytes_in_display_order = <[u8; 32]>::from_hex(hex)?;
541
542 Ok(ExpandedDifficulty::from_bytes_in_display_order(
543 &bytes_in_display_order,
544 ))
545 }
546}
547
548impl From<U256> for ExpandedDifficulty {
549 fn from(value: U256) -> Self {
550 ExpandedDifficulty(value)
551 }
552}
553
554impl From<ExpandedDifficulty> for U256 {
555 fn from(value: ExpandedDifficulty) -> Self {
556 value.0
557 }
558}
559
560impl Sum<ExpandedDifficulty> for ExpandedDifficulty {
561 fn sum<I: Iterator<Item = ExpandedDifficulty>>(iter: I) -> Self {
562 iter.map(|d| d.0).fold(U256::zero(), Add::add).into()
563 }
564}
565
566impl<T> Div<T> for ExpandedDifficulty
567where
568 T: Into<U256>,
569{
570 type Output = ExpandedDifficulty;
571
572 fn div(self, rhs: T) -> Self::Output {
573 ExpandedDifficulty(self.0 / rhs)
574 }
575}
576
577impl<T> Mul<T> for ExpandedDifficulty
578where
579 U256: Mul<T>,
580 <U256 as Mul<T>>::Output: Into<U256>,
581{
582 type Output = ExpandedDifficulty;
583
584 fn mul(self, rhs: T) -> ExpandedDifficulty {
585 ExpandedDifficulty((self.0 * rhs).into())
586 }
587}
588
589impl PartialEq<block::Hash> for ExpandedDifficulty {
590 /// Is `self` equal to `other`?
591 ///
592 /// See `partial_cmp` for details.
593 fn eq(&self, other: &block::Hash) -> bool {
594 self.partial_cmp(other) == Some(Ordering::Equal)
595 }
596}
597
598impl PartialOrd<block::Hash> for ExpandedDifficulty {
599 /// # Consensus
600 ///
601 /// `block::Hash`es are compared with `ExpandedDifficulty` thresholds by
602 /// converting the hash to a 256-bit integer in little-endian order.
603 ///
604 /// Greater values represent *less* work. This matches the convention in
605 /// zcashd and bitcoin.
606 ///
607 /// <https://zips.z.cash/protocol/protocol.pdf#workdef>
608 fn partial_cmp(&self, other: &block::Hash) -> Option<Ordering> {
609 self.partial_cmp(&ExpandedDifficulty::from_hash(other))
610 }
611}
612
613impl PartialEq<ExpandedDifficulty> for block::Hash {
614 /// Is `self` equal to `other`?
615 ///
616 /// See `<ExpandedDifficulty as PartialOrd<block::Hash>::partial_cmp`
617 /// for details.
618 fn eq(&self, other: &ExpandedDifficulty) -> bool {
619 other.eq(self)
620 }
621}
622
623impl PartialOrd<ExpandedDifficulty> for block::Hash {
624 /// How does `self` compare to `other`?
625 ///
626 /// # Consensus
627 ///
628 /// See `<ExpandedDifficulty as PartialOrd<block::Hash>::partial_cmp`
629 /// for details.
630 #[allow(clippy::unwrap_in_result)]
631 fn partial_cmp(&self, other: &ExpandedDifficulty) -> Option<Ordering> {
632 Some(
633 // Use the canonical implementation, but reverse the order
634 other
635 .partial_cmp(self)
636 .expect("difficulties and hashes have a total order")
637 .reverse(),
638 )
639 }
640}
641
642impl std::ops::Add for Work {
643 type Output = PartialCumulativeWork;
644
645 fn add(self, rhs: Work) -> PartialCumulativeWork {
646 PartialCumulativeWork::from(self) + rhs
647 }
648}
649
650/// Partial work used to track relative work in non-finalized chains
651///
652/// # Consensus
653///
654/// Use to choose the best chain with the most work.
655///
656/// Since it is only relative values that matter, Zebra uses the partial work from a shared
657/// fork root block to find the best chain.
658///
659/// See [`Work`] for details.
660#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
661pub struct PartialCumulativeWork(u128);
662
663impl PartialCumulativeWork {
664 /// Returns a value representing no work.
665 pub fn zero() -> Self {
666 Self(0)
667 }
668
669 /// Return the inner `u128` value.
670 pub fn as_u128(self) -> u128 {
671 self.0
672 }
673
674 /// Returns a floating-point work multiplier that can be used for display.
675 /// The returned value is the work as a multiple of the target difficulty limit for `network`.
676 pub fn difficulty_multiplier_for_display(&self, network: Network) -> f64 {
677 // This calculation is similar to the `getdifficulty` RPC, see that code for details.
678
679 let pow_limit = network
680 .target_difficulty_limit()
681 .to_compact()
682 .to_work()
683 .expect("target difficult limit is valid work");
684
685 // Convert to u128 then f64.
686 let pow_limit = pow_limit.as_u128() as f64;
687 let work = self.as_u128() as f64;
688
689 work / pow_limit
690 }
691
692 /// Returns floating-point work bits that can be used for display.
693 /// The returned value is the number of hash bits represented by the work.
694 pub fn difficulty_bits_for_display(&self) -> f64 {
695 // This calculation is similar to `zcashd`'s bits display in its logs.
696
697 // Convert to u128 then f64.
698 let work = self.as_u128() as f64;
699
700 work.log2()
701 }
702}
703
704/// Network methods related to Difficulty
705pub trait ParameterDifficulty {
706 /// Returns the easiest target difficulty allowed on `network`.
707 ///
708 /// # Consensus
709 ///
710 /// See `PoWLimit` in the Zcash specification:
711 /// <https://zips.z.cash/protocol/protocol.pdf#constants>
712 fn target_difficulty_limit(&self) -> ExpandedDifficulty;
713}
714
715impl ParameterDifficulty for Network {
716 /// Returns the easiest target difficulty allowed on `network`.
717 /// See [`ParameterDifficulty::target_difficulty_limit`]
718 fn target_difficulty_limit(&self) -> ExpandedDifficulty {
719 let limit: U256 = match self {
720 // Mainnet PoWLimit is defined as `2^243 - 1` on page 73 of the protocol specification:
721 // <https://zips.z.cash/protocol/protocol.pdf>
722 Network::Mainnet => (U256::one() << 243) - 1,
723 // 2^251 - 1 for the default testnet, see `testnet::ParametersBuilder::default`()
724 Network::Testnet(params) => return params.target_difficulty_limit(),
725 };
726
727 // `zcashd` converts the PoWLimit into a compact representation before
728 // using it to perform difficulty filter checks.
729 //
730 // The Zcash specification converts to compact for the default difficulty
731 // filter, but not for testnet minimum difficulty blocks. (ZIP 205 and
732 // ZIP 208 don't specify this conversion either.) See #1277 for details.
733 ExpandedDifficulty(limit)
734 .to_compact()
735 .to_expanded()
736 .expect("difficulty limits are valid expanded values")
737 }
738}
739
740impl From<Work> for PartialCumulativeWork {
741 fn from(work: Work) -> Self {
742 PartialCumulativeWork(work.0)
743 }
744}
745
746impl std::ops::Add<Work> for PartialCumulativeWork {
747 type Output = PartialCumulativeWork;
748
749 fn add(self, rhs: Work) -> Self::Output {
750 let result = self
751 .0
752 .checked_add(rhs.0)
753 .expect("Work values do not overflow");
754
755 PartialCumulativeWork(result)
756 }
757}
758
759impl std::ops::AddAssign<Work> for PartialCumulativeWork {
760 fn add_assign(&mut self, rhs: Work) {
761 *self = *self + rhs;
762 }
763}
764
765impl std::ops::Sub<Work> for PartialCumulativeWork {
766 type Output = PartialCumulativeWork;
767
768 fn sub(self, rhs: Work) -> Self::Output {
769 let result = self.0
770 .checked_sub(rhs.0)
771 .expect("PartialCumulativeWork values do not underflow: all subtracted Work values must have been previously added to the PartialCumulativeWork");
772
773 PartialCumulativeWork(result)
774 }
775}
776
777impl std::ops::SubAssign<Work> for PartialCumulativeWork {
778 fn sub_assign(&mut self, rhs: Work) {
779 *self = *self - rhs;
780 }
781}