binius_ntt/
additive_ntt.rs

1// Copyright 2024-2025 Irreducible Inc.
2
3use binius_field::{BinaryField, ExtensionField, PackedExtension, PackedField};
4use binius_math::BinarySubspace;
5
6use super::error::Error;
7
8/// Shape of a batched NTT operation on a 3-dimensional tensor.
9///
10/// The tensor has three dimensions. Elements are indexed first along axis X (width),
11/// then axis Y (height), then axis Z (depth). In other words, the range of elements sharing Y and
12/// Z coordinates are contiguous in memory, elements sharing X and Z coordinates are strided by the
13/// tensor width, and elements sharing X and Y coordinates are strided by the width times height.
14///
15/// The tensor has power-of-two length in each dimension.
16///
17/// The NTT operation performs a parallel NTT along the _Y axis_, meaning each the operation
18/// transforms each column independently.
19#[derive(Debug, Default, Clone, Copy)]
20pub struct NTTShape {
21	/// Log length along the X axis (width). This is the interleaved batch size.
22	pub log_x: usize,
23	/// Log length along the Y axis (height). This is the size of the NTT transform.
24	pub log_y: usize,
25	/// Log length along the Z axis (depth). This is the consecutive batch size.
26	pub log_z: usize,
27}
28
29/// The binary field additive NTT.
30///
31/// A number-theoretic transform (NTT) is a linear transformation on a finite field analogous to
32/// the discrete fourier transform. The version of the additive NTT we use is originally described
33/// in [LCH14]. In [DP24] Section 3.1, the authors present the LCH additive NTT algorithm in a way
34/// that makes apparent its compatibility with the FRI proximity test. Throughout the
35/// documentation, we will refer to the notation used in [DP24].
36///
37/// The additive NTT is parameterized by a binary field $K$ and $\mathbb{F}\_2$-linear subspace. We
38/// write $\beta_0, \ldots, \beta_{\ell-1}$ for the ordered basis elements of the subspace and
39/// require $\beta_0 = 1$. The basis determines a novel polynomial basis and an evaluation domain.
40/// In the forward direction, the additive NTT transforms a vector of polynomial coefficients, with
41/// respect to the novel polynomial basis, into a vector of their evaluations over the evaluation
42/// domain. The inverse transformation interpolates polynomial values over the domain into novel
43/// polynomial basis coefficients.
44///
45/// An [`AdditiveNTT`] implementation with a maximum domain dimension of $\ell$ can be applied on
46/// a sequence of  $\ell + 1$ evaluation domains of sizes $2^0, \ldots, 2^\ell$. These are the
47/// domains $S^{(\ell)}, S^{(\ell - 1)}, \ldots, S^{(0)}$ defined in [DP24] Section 4. The methods
48/// [`Self::forward_transform`] and [`Self::inverse_transform`] require a parameter
49/// `log_domain_size` that indicates which of the $S^(i)$ domains to use for the transformation's
50/// evaluation domain and novel polynomial basis. (Remember, the novel polynomial basis is itself
51/// parameterized by basis). **Counterintuitively, the field subspace $S^(i+1)$ is not necessarily
52/// a subset of $S^i$**. We choose this behavior for the [`AdditiveNTT`] trait because it
53/// facilitates compatibility with FRI when batching proximity tests for codewords of different
54/// dimensions.
55///
56/// [LCH14]: <https://arxiv.org/abs/1404.3458>
57/// [DP24]: <https://eprint.iacr.org/2024/504>
58pub trait AdditiveNTT<F: BinaryField> {
59	/// Base-2 logarithm of the maximum size of the NTT domain, $\ell$.
60	fn log_domain_size(&self) -> usize;
61
62	/// Returns the binary subspace $S^{(i)}$.
63	///
64	/// The domain will have dimension $\ell - i$.
65	///
66	/// ## Preconditions
67	///
68	/// * `i` must be less than `self.log_domain_size()`
69	fn subspace(&self, i: usize) -> BinarySubspace<F>;
70
71	/// Get the normalized subspace polynomial evaluation $\hat{W}_i(\beta_j)$.
72	///
73	/// ## Preconditions
74	///
75	/// * `i` must be less than `self.log_domain_size()`
76	/// * `j` must be less than `self.log_domain_size() - i`
77	fn get_subspace_eval(&self, i: usize, j: usize) -> F;
78
79	/// Batched forward transformation defined in [DP24], Section 2.3.
80	///
81	/// The scalars of `data`, viewed in natural order, represent a tensor of `shape` dimensions.
82	/// See [`NTTShape`] for layout details. The transform is inplace, output adheres to `shape`.
83	///
84	/// The `coset` specifies the evaluation domain as an indexed coset of the subspace. The coset
85	/// index must representable in `coset_bits` bits. The evaluation domain of the NTT is chosen
86	/// to be the subspace $S^{(i)}$ with dimension `shape.log_y + coset_bits`. Choosing the NTT
87	/// domain in this way ensures consistency of the domains when using FRI with different
88	/// codeword lengths.
89	///
90	/// The transformation accepts a `skip_rounds` parameter, which is a number of NTT layers to
91	/// skip at the beginning of the forward transform. This corresponds to parallel NTTs on
92	/// multiple adjacent subspace cosets, but beginning with messages interpreted as coefficients
93	/// in a modified novel polynomial basis. See [DP24] Remark 4.15.
94	///
95	/// [DP24]: <https://eprint.iacr.org/2024/504>
96	fn forward_transform<P: PackedField<Scalar = F>>(
97		&self,
98		data: &mut [P],
99		shape: NTTShape,
100		coset: usize,
101		coset_bits: usize,
102		skip_rounds: usize,
103	) -> Result<(), Error>;
104
105	/// Batched inverse transformation defined in [DP24], Section 2.3.
106	///
107	/// The scalars of `data`, viewed in natural order, represent a tensor of `shape` dimensions.
108	/// See [`NTTShape`] for layout details. The transform is inplace, output adheres to `shape`.
109	///
110	/// The `coset` specifies the evaluation domain as an indexed coset of the subspace. The coset
111	/// index must representable in `coset_bits` bits. The evaluation domain of the NTT is chosen
112	/// to be the subspace $S^{(i)}$ with dimension `shape.log_y + coset_bits`. Choosing the NTT
113	/// domain in this way ensures consistency of the domains when using FRI with different
114	/// codeword lengths.
115	///
116	/// The transformation accepts a `skip_rounds` parameter, which is a number of NTT layers to
117	/// skip at the end of the inverse transform. This corresponds to parallel NTTs on multiple
118	/// adjacent subspace cosets, but returning with messages interpreted as coefficients in a
119	/// modified novel polynomial basis. See [DP24] Remark 4.15.
120	///
121	/// [DP24]: <https://eprint.iacr.org/2024/504>
122	fn inverse_transform<P: PackedField<Scalar = F>>(
123		&self,
124		data: &mut [P],
125		shape: NTTShape,
126		coset: usize,
127		coset_bits: usize,
128		skip_rounds: usize,
129	) -> Result<(), Error>;
130
131	fn forward_transform_ext<PE: PackedExtension<F>>(
132		&self,
133		data: &mut [PE],
134		shape: NTTShape,
135		coset: usize,
136		coset_bits: usize,
137		skip_rounds: usize,
138	) -> Result<(), Error> {
139		let shape_ext = NTTShape {
140			log_x: shape.log_x + PE::Scalar::LOG_DEGREE,
141			..shape
142		};
143		self.forward_transform(PE::cast_bases_mut(data), shape_ext, coset, coset_bits, skip_rounds)
144	}
145
146	fn inverse_transform_ext<PE: PackedExtension<F>>(
147		&self,
148		data: &mut [PE],
149		shape: NTTShape,
150		coset: usize,
151		coset_bits: usize,
152		skip_rounds: usize,
153	) -> Result<(), Error> {
154		let shape_ext = NTTShape {
155			log_x: shape.log_x + PE::Scalar::LOG_DEGREE,
156			..shape
157		};
158		self.inverse_transform(PE::cast_bases_mut(data), shape_ext, coset, coset_bits, skip_rounds)
159	}
160}