binius_core/merkle_tree/
scheme.rs

1// Copyright 2024-2025 Irreducible Inc.
2
3use std::{array, fmt::Debug, marker::PhantomData};
4
5use binius_field::TowerField;
6use binius_hash::{PseudoCompressionFunction, hash_serialize};
7use binius_utils::{
8	bail,
9	checked_arithmetics::{log2_ceil_usize, log2_strict_usize},
10};
11use bytes::Buf;
12use digest::{Digest, Output, core_api::BlockSizeUser};
13use getset::Getters;
14
15use super::{
16	errors::{Error, VerificationError},
17	merkle_tree_vcs::MerkleTreeScheme,
18};
19use crate::transcript::TranscriptReader;
20
21#[derive(Debug, Getters)]
22pub struct BinaryMerkleTreeScheme<T, H, C> {
23	#[getset(get = "pub")]
24	compression: C,
25	// This makes it so that `BinaryMerkleTreeScheme` remains Send + Sync
26	// See https://doc.rust-lang.org/nomicon/phantom-data.html#table-of-phantomdata-patterns
27	_phantom: PhantomData<fn() -> (T, H)>,
28}
29
30impl<T, H, C> BinaryMerkleTreeScheme<T, H, C> {
31	pub fn new(compression: C) -> Self {
32		Self {
33			compression,
34			_phantom: PhantomData,
35		}
36	}
37}
38
39impl<F, H, C> MerkleTreeScheme<F> for BinaryMerkleTreeScheme<F, H, C>
40where
41	F: TowerField,
42	H: Digest + BlockSizeUser,
43	C: PseudoCompressionFunction<Output<H>, 2> + Sync,
44{
45	type Digest = Output<H>;
46
47	/// This layer allows minimizing the proof size.
48	fn optimal_verify_layer(&self, n_queries: usize, tree_depth: usize) -> usize {
49		log2_ceil_usize(n_queries).min(tree_depth)
50	}
51
52	fn proof_size(&self, len: usize, n_queries: usize, layer_depth: usize) -> Result<usize, Error> {
53		if !len.is_power_of_two() {
54			bail!(Error::PowerOfTwoLengthRequired)
55		}
56
57		let log_len = log2_strict_usize(len);
58
59		if layer_depth > log_len {
60			bail!(Error::IncorrectLayerDepth)
61		}
62
63		Ok(((log_len - layer_depth - 1) * n_queries + (1 << layer_depth))
64			* <H as Digest>::output_size())
65	}
66
67	fn verify_vector(
68		&self,
69		root: &Self::Digest,
70		data: &[F],
71		batch_size: usize,
72	) -> Result<(), Error> {
73		if data.len() % batch_size != 0 {
74			bail!(Error::IncorrectBatchSize);
75		}
76
77		let mut digests = data
78			.chunks(batch_size)
79			.map(|chunk| {
80				hash_serialize::<F, H>(chunk)
81					.expect("values are of TowerField type which we expect to be serializable")
82			})
83			.collect::<Vec<_>>();
84
85		fold_digests_vector_inplace(&self.compression, &mut digests)?;
86		if digests[0] != *root {
87			bail!(VerificationError::InvalidProof)
88		}
89		Ok(())
90	}
91
92	fn verify_layer(
93		&self,
94		root: &Self::Digest,
95		layer_depth: usize,
96		layer_digests: &[Self::Digest],
97	) -> Result<(), Error> {
98		if 1 << layer_depth != layer_digests.len() {
99			bail!(VerificationError::IncorrectVectorLength)
100		}
101
102		let mut digests = layer_digests.to_owned();
103
104		fold_digests_vector_inplace(&self.compression, &mut digests)?;
105
106		if digests[0] != *root {
107			bail!(VerificationError::InvalidProof)
108		}
109		Ok(())
110	}
111
112	fn verify_opening<B: Buf>(
113		&self,
114		mut index: usize,
115		values: &[F],
116		layer_depth: usize,
117		tree_depth: usize,
118		layer_digests: &[Self::Digest],
119		proof: &mut TranscriptReader<B>,
120	) -> Result<(), Error> {
121		if (1 << layer_depth) != layer_digests.len() {
122			bail!(VerificationError::IncorrectVectorLength);
123		}
124
125		if index >= (1 << tree_depth) {
126			bail!(Error::IndexOutOfRange {
127				max: (1 << tree_depth) - 1
128			});
129		}
130
131		let mut leaf_digest = hash_serialize::<F, H>(values)
132			.expect("values are of TowerField type which we expect to be serializable");
133		for branch_node in proof.read_vec(tree_depth - layer_depth)? {
134			leaf_digest = self.compression.compress(if index & 1 == 0 {
135				[leaf_digest, branch_node]
136			} else {
137				[branch_node, leaf_digest]
138			});
139			index >>= 1;
140		}
141
142		(leaf_digest == layer_digests[index])
143			.then_some(())
144			.ok_or_else(|| VerificationError::InvalidProof.into())
145	}
146}
147
148// Merkle-tree-like folding
149fn fold_digests_vector_inplace<C, D>(compression: &C, digests: &mut [D]) -> Result<(), Error>
150where
151	C: PseudoCompressionFunction<D, 2> + Sync,
152	D: Clone + Default + Send + Sync + Debug,
153{
154	if !digests.len().is_power_of_two() {
155		bail!(Error::PowerOfTwoLengthRequired);
156	}
157
158	let mut len = digests.len() / 2;
159
160	while len != 0 {
161		for i in 0..len {
162			digests[i] = compression.compress(array::from_fn(|j| digests[2 * i + j].clone()));
163		}
164		len /= 2;
165	}
166
167	Ok(())
168}