binius_ntt/additive_ntt.rs
1// Copyright 2024-2025 Irreducible Inc.
2
3use binius_field::{BinaryField, ExtensionField, PackedExtension, PackedField};
4use binius_math::BinarySubspace;
5
6use super::error::Error;
7
8/// Shape of a batched NTT operation on a 3-dimensional tensor.
9///
10/// The tensor has three dimensions. Elements are indexed first along axis X (width),
11/// then axis Y (height), then axis Z (depth). In other words, the range of elements sharing Y and
12/// Z coordinates are contiguous in memory, elements sharing X and Z coordinates are strided by the
13/// tensor width, and elements sharing X and Y coordinates are strided by the width times height.
14///
15/// The tensor has power-of-two length in each dimension.
16///
17/// The NTT operation performs a parallel NTT along the _Y axis_, meaning each the operation
18/// transforms each column independently.
19#[derive(Debug, Default, Clone, Copy)]
20pub struct NTTShape {
21 /// Log length along the X axis (width). This is the interleaved batch size.
22 pub log_x: usize,
23 /// Log length along the Y axis (height). This is the size of the NTT transform.
24 pub log_y: usize,
25 /// Log length along the Z axis (depth). This is the consecutive batch size.
26 pub log_z: usize,
27}
28
29/// The binary field additive NTT.
30///
31/// A number-theoretic transform (NTT) is a linear transformation on a finite field analogous to
32/// the discrete fourier transform. The version of the additive NTT we use is originally described
33/// in [LCH14]. In [DP24] Section 3.1, the authors present the LCH additive NTT algorithm in a way
34/// that makes apparent its compatibility with the FRI proximity test. Throughout the
35/// documentation, we will refer to the notation used in [DP24].
36///
37/// The additive NTT is parameterized by a binary field $K$ and $\mathbb{F}\_2$-linear subspace. We
38/// write $\beta_0, \ldots, \beta_{\ell-1}$ for the ordered basis elements of the subspace and
39/// require $\beta_0 = 1$. The basis determines a novel polynomial basis and an evaluation domain.
40/// In the forward direction, the additive NTT transforms a vector of polynomial coefficients, with
41/// respect to the novel polynomial basis, into a vector of their evaluations over the evaluation
42/// domain. The inverse transformation interpolates polynomial values over the domain into novel
43/// polynomial basis coefficients.
44///
45/// An [`AdditiveNTT`] implementation with a maximum domain dimension of $\ell$ can be applied on
46/// a sequence of $\ell + 1$ evaluation domains of sizes $2^0, \ldots, 2^\ell$. These are the
47/// domains $S^{(\ell)}, S^{(\ell - 1)}, \ldots, S^{(0)}$ defined in [DP24] Section 4. The methods
48/// [`Self::forward_transform`] and [`Self::inverse_transform`] require a parameter
49/// `log_domain_size` that indicates which of the $S^(i)$ domains to use for the transformation's
50/// evaluation domain and novel polynomial basis. (Remember, the novel polynomial basis is itself
51/// parameterized by basis). **Counterintuitively, the field subspace $S^(i+1)$ is not necessarily
52/// a subset of $S^i$**. We choose this behavior for the [`AdditiveNTT`] trait because it
53/// facilitates compatibility with FRI when batching proximity tests for codewords of different
54/// dimensions.
55///
56/// [LCH14]: <https://arxiv.org/abs/1404.3458>
57/// [DP24]: <https://eprint.iacr.org/2024/504>
58pub trait AdditiveNTT<F: BinaryField> {
59 /// Base-2 logarithm of the maximum size of the NTT domain, $\ell$.
60 fn log_domain_size(&self) -> usize;
61
62 /// Returns the binary subspace with dimension $i$.
63 ///
64 /// In [DP24], this subspace is referred to as $S^{(\ell - i)}$, where $\ell$ is the maximum
65 /// domain size of the NTT. We choose to reverse the indexing order with respect to the paper
66 /// because it is more natural in code that $i$th subspace have dimension $i$.
67 ///
68 /// ## Preconditions
69 ///
70 /// * `i` must be less than `self.log_domain_size()`
71 ///
72 /// [DP24]: <https://eprint.iacr.org/2024/504>
73 fn subspace(&self, i: usize) -> BinarySubspace<F>;
74
75 /// Get the $j$'th basis element of the $i$'th subspace.
76 ///
77 /// ## Preconditions
78 ///
79 /// * `i` must be less than `self.log_domain_size()`
80 /// * `j` must be less than `i`
81 fn get_subspace_eval(&self, i: usize, j: usize) -> F {
82 self.subspace(i).basis()[j]
83 }
84
85 /// Batched forward transformation defined in [DP24], Section 2.3.
86 ///
87 /// The scalars of `data`, viewed in natural order, represent a tensor of `shape` dimensions.
88 /// See [`NTTShape`] for layout details. The transform is inplace, output adheres to `shape`.
89 ///
90 /// The `coset` specifies the evaluation domain as an indexed coset of the subspace. The coset
91 /// index must representable in `coset_bits` bits. The evaluation domain of the NTT is chosen
92 /// to be the subspace $S^{(i)}$ with dimension `shape.log_y + coset_bits`. Choosing the NTT
93 /// domain in this way ensures consistency of the domains when using FRI with different
94 /// codeword lengths.
95 ///
96 /// The transformation accepts a `skip_rounds` parameter, which is a number of NTT layers to
97 /// skip at the beginning of the forward transform. This corresponds to parallel NTTs on
98 /// multiple adjacent subspace cosets, but beginning with messages interpreted as coefficients
99 /// in a modified novel polynomial basis. See [DP24] Remark 4.15.
100 ///
101 /// [DP24]: <https://eprint.iacr.org/2024/504>
102 fn forward_transform<P: PackedField<Scalar = F>>(
103 &self,
104 data: &mut [P],
105 shape: NTTShape,
106 coset: usize,
107 coset_bits: usize,
108 skip_rounds: usize,
109 ) -> Result<(), Error>;
110
111 /// Batched inverse transformation defined in [DP24], Section 2.3.
112 ///
113 /// The scalars of `data`, viewed in natural order, represent a tensor of `shape` dimensions.
114 /// See [`NTTShape`] for layout details. The transform is inplace, output adheres to `shape`.
115 ///
116 /// The `coset` specifies the evaluation domain as an indexed coset of the subspace. The coset
117 /// index must representable in `coset_bits` bits. The evaluation domain of the NTT is chosen
118 /// to be the subspace $S^{(i)}$ with dimension `shape.log_y + coset_bits`. Choosing the NTT
119 /// domain in this way ensures consistency of the domains when using FRI with different
120 /// codeword lengths.
121 ///
122 /// The transformation accepts a `skip_rounds` parameter, which is a number of NTT layers to
123 /// skip at the end of the inverse transform. This corresponds to parallel NTTs on multiple
124 /// adjacent subspace cosets, but returning with messages interpreted as coefficients in a
125 /// modified novel polynomial basis. See [DP24] Remark 4.15.
126 ///
127 /// [DP24]: <https://eprint.iacr.org/2024/504>
128 fn inverse_transform<P: PackedField<Scalar = F>>(
129 &self,
130 data: &mut [P],
131 shape: NTTShape,
132 coset: usize,
133 coset_bits: usize,
134 skip_rounds: usize,
135 ) -> Result<(), Error>;
136
137 fn forward_transform_ext<PE: PackedExtension<F>>(
138 &self,
139 data: &mut [PE],
140 shape: NTTShape,
141 coset: usize,
142 coset_bits: usize,
143 skip_rounds: usize,
144 ) -> Result<(), Error> {
145 let shape_ext = NTTShape {
146 log_x: shape.log_x + PE::Scalar::LOG_DEGREE,
147 ..shape
148 };
149 self.forward_transform(PE::cast_bases_mut(data), shape_ext, coset, coset_bits, skip_rounds)
150 }
151
152 fn inverse_transform_ext<PE: PackedExtension<F>>(
153 &self,
154 data: &mut [PE],
155 shape: NTTShape,
156 coset: usize,
157 coset_bits: usize,
158 skip_rounds: usize,
159 ) -> Result<(), Error> {
160 let shape_ext = NTTShape {
161 log_x: shape.log_x + PE::Scalar::LOG_DEGREE,
162 ..shape
163 };
164 self.inverse_transform(PE::cast_bases_mut(data), shape_ext, coset, coset_bits, skip_rounds)
165 }
166}