binius_ntt/
additive_ntt.rs

1// Copyright 2024-2025 Irreducible Inc.
2
3use binius_field::{BinaryField, ExtensionField, PackedExtension, PackedField};
4use binius_math::BinarySubspace;
5
6use super::error::Error;
7
8/// Shape of a batched NTT operation on a 3-dimensional tensor.
9///
10/// The tensor has three dimensions. Elements are indexed first along axis X (width),
11/// then axis Y (height), then axis Z (depth). In other words, the range of elements sharing Y and
12/// Z coordinates are contiguous in memory, elements sharing X and Z coordinates are strided by the
13/// tensor width, and elements sharing X and Y coordinates are strided by the width times height.
14///
15/// The tensor has power-of-two length in each dimension.
16///
17/// The NTT operation performs a parallel NTT along the _Y axis_, meaning each the operation
18/// transforms each column independently.
19#[derive(Debug, Default, Clone, Copy)]
20pub struct NTTShape {
21	/// Log length along the X axis (width). This is the interleaved batch size.
22	pub log_x: usize,
23	/// Log length along the Y axis (height). This is the size of the NTT transform.
24	pub log_y: usize,
25	/// Log length along the Z axis (depth). This is the consecutive batch size.
26	pub log_z: usize,
27}
28
29/// The binary field additive NTT.
30///
31/// A number-theoretic transform (NTT) is a linear transformation on a finite field analogous to
32/// the discrete fourier transform. The version of the additive NTT we use is originally described
33/// in [LCH14]. In [DP24] Section 3.1, the authors present the LCH additive NTT algorithm in a way
34/// that makes apparent its compatibility with the FRI proximity test. Throughout the
35/// documentation, we will refer to the notation used in [DP24].
36///
37/// The additive NTT is parameterized by a binary field $K$ and $\mathbb{F}\_2$-linear subspace. We
38/// write $\beta_0, \ldots, \beta_{\ell-1}$ for the ordered basis elements of the subspace and
39/// require $\beta_0 = 1$. The basis determines a novel polynomial basis and an evaluation domain.
40/// In the forward direction, the additive NTT transforms a vector of polynomial coefficients, with
41/// respect to the novel polynomial basis, into a vector of their evaluations over the evaluation
42/// domain. The inverse transformation interpolates polynomial values over the domain into novel
43/// polynomial basis coefficients.
44///
45/// [LCH14]: <https://arxiv.org/abs/1404.3458>
46/// [DP24]: <https://eprint.iacr.org/2024/504>
47pub trait AdditiveNTT<F: BinaryField> {
48	/// Base-2 logarithm of the maximum size of the NTT domain, $\ell$.
49	fn log_domain_size(&self) -> usize;
50
51	/// Returns the binary subspace $S^{(i)}$.
52	///
53	/// The domain will have dimension $\ell - i$.
54	///
55	/// ## Preconditions
56	///
57	/// * `i` must be less than `self.log_domain_size()`
58	fn subspace(&self, i: usize) -> BinarySubspace<F>;
59
60	/// Get the normalized subspace polynomial evaluation $\hat{W}_i(\beta_j)$.
61	///
62	/// ## Preconditions
63	///
64	/// * `i` must be less than `self.log_domain_size()`
65	/// * `j` must be less than `self.log_domain_size() - i`
66	fn get_subspace_eval(&self, i: usize, j: usize) -> F;
67
68	/// Batched forward transformation defined in [DP24], Section 2.3.
69	///
70	/// The scalars of `data`, viewed in natural order, represent a tensor of `shape` dimensions.
71	/// See [`NTTShape`] for layout details. The transform is inplace, output adheres to `shape`.
72	///
73	/// The `coset` specifies the evaluation domain as an indexed coset of the subspace.
74	///
75	/// The transformation accepts a `skip_rounds` parameter, which is a number of NTT layers to
76	/// skip at the beginning of the forward transform. This corresponds to parallel NTTs on
77	/// multiple adjacent subspace cosets, but beginning with messages interpreted as coefficients
78	/// in a modified novel polynomial basis. See [DP24] Remark 4.15.
79	///
80	/// [DP24]: <https://eprint.iacr.org/2024/504>
81	fn forward_transform<P: PackedField<Scalar = F>>(
82		&self,
83		data: &mut [P],
84		shape: NTTShape,
85		coset: u32,
86		skip_rounds: usize,
87	) -> Result<(), Error>;
88
89	/// Batched inverse transformation defined in [DP24], Section 2.3.
90	///
91	/// The scalars of `data`, viewed in natural order, represent a tensor of `shape` dimensions.
92	/// See [`NTTShape`] for layout details. The transform is inplace, output adheres to `shape`.
93	///
94	/// The `coset` specifies the evaluation domain as an indexed coset of the subspace.
95	///
96	/// The transformation accepts a `skip_rounds` parameter, which is a number of NTT layers to
97	/// skip at the end of the inverse transform. This corresponds to parallel NTTs on multiple
98	/// adjacent subspace cosets, but returning with messages interpreted as coefficients in a
99	/// modified novel polynomial basis. See [DP24] Remark 4.15.
100	///
101	/// [DP24]: <https://eprint.iacr.org/2024/504>
102	fn inverse_transform<P: PackedField<Scalar = F>>(
103		&self,
104		data: &mut [P],
105		shape: NTTShape,
106		coset: u32,
107		skip_rounds: usize,
108	) -> Result<(), Error>;
109
110	fn forward_transform_ext<PE: PackedExtension<F>>(
111		&self,
112		data: &mut [PE],
113		shape: NTTShape,
114		coset: u32,
115		skip_rounds: usize,
116	) -> Result<(), Error> {
117		let shape_ext = NTTShape {
118			log_x: shape.log_x + PE::Scalar::LOG_DEGREE,
119			..shape
120		};
121		self.forward_transform(PE::cast_bases_mut(data), shape_ext, coset, skip_rounds)
122	}
123
124	fn inverse_transform_ext<PE: PackedExtension<F>>(
125		&self,
126		data: &mut [PE],
127		shape: NTTShape,
128		coset: u32,
129		skip_rounds: usize,
130	) -> Result<(), Error> {
131		let shape_ext = NTTShape {
132			log_x: shape.log_x + PE::Scalar::LOG_DEGREE,
133			..shape
134		};
135		self.inverse_transform(PE::cast_bases_mut(data), shape_ext, coset, skip_rounds)
136	}
137}