binius_m3/builder/
structured.rs

1// Copyright 2025 Irreducible Inc.
2
3use binius_field::{ExtensionField, TowerField};
4use binius_math::ArithExpr;
5
6use crate::builder::B1;
7
8#[derive(Debug, thiserror::Error)]
9pub enum Error {
10	#[error("max_log_size must be less than or equal to F::N_BITS")]
11	MaxLogSizeTooLarge,
12
13	#[error("table size must be less than or equal to max_log_size")]
14	TableSizeTooLarge,
15
16	#[error("math error: {0}")]
17	Math(#[from] binius_math::Error),
18}
19
20/// Specifications of structured columns that generated from a dynamic table size.
21///
22/// A structured column is one that has sufficient structure that its multilinear extension
23/// can be evaluated succinctly. These are referred to as "MLE-structured" tables in [Lasso].
24///
25/// [Lasso]: <https://eprint.iacr.org/2023/1216>
26#[derive(Debug, Clone, Copy, PartialEq, Eq)]
27pub enum StructuredDynSize {
28	/// A column whose values are incrementing binary field elements in lexicographic order.
29	Incrementing {
30		/// The base-2 logarithm of the maximum size of the column.
31		max_size_log: usize,
32	},
33}
34
35impl StructuredDynSize {
36	/// Returns an arithmetic expression that represents the multilinear extension of the
37	/// structured column.
38	pub fn expr<F: TowerField>(self) -> Result<ArithExpr<F>, Error> {
39		match self {
40			StructuredDynSize::Incrementing { max_size_log } => {
41				incrementing_expr::<F>(max_size_log)
42			}
43		}
44	}
45
46	/// Returns the maximum size of the column.
47	fn max_size_log(&self) -> usize {
48		match self {
49			StructuredDynSize::Incrementing { max_size_log } => *max_size_log,
50		}
51	}
52
53	/// Checks whether the given table size specified as n_vars can fit into this structured column
54	/// specifier.
55	pub fn check_nvars(&self, n_vars: usize) -> Result<(), Error> {
56		if n_vars > self.max_size_log() {
57			Err(Error::MaxLogSizeTooLarge)
58		} else {
59			Ok(())
60		}
61	}
62}
63
64/// Returns the arithmetic expression for an incrementing column.
65///
66/// The multilinear expression is
67///
68/// $$
69/// \sum_{v \in B_n} X_i \beta_i,
70/// $$
71///
72/// where $\beta_i$ is the $i$-th basis element of the field $F$ as an $\mathbb{F}_2$ vector space.
73pub fn incrementing_expr<F: TowerField>(max_log_size: usize) -> Result<ArithExpr<F>, Error> {
74	if max_log_size > F::N_BITS {
75		return Err(Error::MaxLogSizeTooLarge);
76	}
77	let expr = (0..max_log_size)
78		.map(|i| ArithExpr::Var(i) * ArithExpr::Const(<F as ExtensionField<B1>>::basis(i)))
79		.sum::<ArithExpr<F>>();
80	Ok(expr)
81}
82
83#[cfg(test)]
84mod tests {
85	use std::iter::{self};
86
87	use binius_compute::cpu::alloc::CpuComputeAllocator;
88	use binius_core::polynomial::test_utils::decompose_index_to_hypercube_point;
89	use binius_fast_compute::arith_circuit::ArithCircuitPoly;
90	use binius_field::{BinaryField32b, arch::OptimalUnderlier128b, as_packed_field::PackedType};
91	use binius_math::{ArithCircuit, CompositionPoly};
92	use itertools::izip;
93
94	use super::*;
95	use crate::{
96		builder::{
97			B16, B32, B128, ConstraintSystem, WitnessIndex,
98			test_utils::{ClosureFiller, validate_system_witness},
99		},
100		gadgets::structured::fill_incrementing_b32,
101	};
102
103	#[test]
104	fn test_incrementing_expr() {
105		let expr = incrementing_expr::<B32>(5).unwrap();
106		let evaluator = ArithCircuitPoly::new(expr.into());
107		for i in 0..1 << 5 {
108			let bits = decompose_index_to_hypercube_point::<B32>(5, i);
109			assert_eq!(evaluator.evaluate(&bits).unwrap(), B32::new(i as u32));
110		}
111	}
112
113	#[test]
114	fn test_fill_incrementing() {
115		let mut cs = ConstraintSystem::new();
116		let mut table = cs.add_table("test");
117		table.require_power_of_two_size();
118		let test_table_id = table.id();
119		let expected_col = table.add_committed::<B32, 1>("reference");
120		let structured_col = table.add_structured::<B32>(
121			"incrementing",
122			StructuredDynSize::Incrementing { max_size_log: 32 },
123		);
124		table.assert_zero("reference = structured", expected_col - structured_col);
125
126		let mut allocator = CpuComputeAllocator::new(1 << 12);
127		let allocator = allocator.into_bump_allocator();
128		let mut witness =
129			WitnessIndex::<PackedType<OptimalUnderlier128b, B128>>::new(&cs, &allocator);
130		{
131			let table_witness = witness.init_table(test_table_id, 1 << 5).unwrap();
132			table_witness
133				.fill_sequential_with_segment_size(
134					&ClosureFiller::new(test_table_id, |events, index| {
135						{
136							let mut expected_col = index.get_scalars_mut::<B32, 1>(expected_col)?;
137							for (&i, col_i) in iter::zip(events, &mut *expected_col) {
138								*col_i = BinaryField32b::new(i);
139							}
140						}
141
142						fill_incrementing_b32(index, structured_col)?;
143						Ok(())
144					}),
145					&(0..1 << 5).collect::<Vec<_>>(),
146					// Test that fill works when the segment size is less than the full index size.
147					4,
148				)
149				.unwrap();
150		}
151
152		validate_system_witness::<OptimalUnderlier128b>(&cs, witness, vec![]);
153	}
154
155	#[test]
156	fn test_fill_bitwise_and() {
157		let log_size = 8;
158
159		let mut cs = ConstraintSystem::new();
160		let mut table = cs.add_table("test");
161		table.require_fixed_size(log_size);
162		let test_table_id = table.id();
163		let expected_col = table.add_committed::<B16, 1>("reference");
164
165		let lookup_index = (0..log_size)
166			.map(|i| {
167				ArithExpr::Var(i) * ArithExpr::Const(<B128 as ExtensionField<B1>>::basis(i + 4))
168			})
169			.sum::<ArithExpr<B128>>();
170
171		let and_res = (0..4)
172			.map(|i| {
173				ArithExpr::Var(i)
174					* ArithExpr::Var(4 + i)
175					* ArithExpr::Const(<B128 as ExtensionField<B1>>::basis(i))
176			})
177			.sum::<ArithExpr<B128>>();
178
179		let expr = lookup_index + and_res;
180
181		let structured_col = table.add_fixed::<B16>("a|b|res", ArithCircuit::from(&expr));
182
183		table.assert_zero("reference = structured", expected_col - structured_col);
184
185		let mut allocator = CpuComputeAllocator::new(1 << 12);
186		let allocator = allocator.into_bump_allocator();
187		let mut witness =
188			WitnessIndex::<PackedType<OptimalUnderlier128b, B128>>::new(&cs, &allocator);
189		witness
190			.fill_table_sequential(
191				&ClosureFiller::new(test_table_id, |events, index| {
192					{
193						let mut expected_col = index.get_scalars_mut::<B16, 1>(expected_col)?;
194						let mut structured_col = index.get_scalars_mut::<B16, 1>(structured_col)?;
195						for (&i, col_i, s_col) in
196							izip!(events, &mut *expected_col, &mut *structured_col)
197						{
198							let x = ((i >> 4) & 15) as u16;
199							let y = (i & 15) as u16;
200							let z = x & y;
201							*col_i = B16::new(((i as u16) << 4) | z);
202							*s_col = *col_i;
203						}
204					}
205
206					Ok(())
207				}),
208				&(0..1 << log_size).collect::<Vec<_>>(),
209			)
210			.unwrap();
211
212		validate_system_witness::<OptimalUnderlier128b>(&cs, witness, vec![]);
213	}
214}