1use std::{cmp, marker::PhantomData, mem::MaybeUninit, slice};
6
7use binius_field::{
8 arch::OptimalUnderlier256b,
9 as_packed_field::{PackScalar, PackedType},
10 underlier::Divisible,
11 AESTowerField8b, BinaryField, BinaryField8b, ExtensionField, PackedAESBinaryField32x8b,
12 PackedAESBinaryField64x8b, PackedExtension, PackedExtensionIndexable, PackedField,
13 PackedFieldIndexable, TowerField,
14};
15
16use super::{super::hasher::Hasher, arch::Groestl256Core};
17use crate::{CompressionFunction, PseudoCompressionFunction};
18
19pub type GroestlDigest<F> = PackedType<OptimalUnderlier256b, F>;
21
22pub type GroestlHasher<P> = Groestl256<P, BinaryField8b>;
24
25const BLOCK_LEN_U8: usize = 64;
26
27#[derive(Debug, Clone)]
33pub struct Groestl256<P, F> {
34 state: PackedAESBinaryField64x8b,
35 current_block: PackedAESBinaryField64x8b,
36 current_len: u64,
37 _p_marker: PhantomData<P>,
38 _f_marker: PhantomData<F>,
39}
40
41trait UpdateOverSlice {
42 type Elem;
43
44 fn update_slice(&mut self, msg: &[Self::Elem], cur_block: usize);
45}
46
47impl<P> UpdateOverSlice for Groestl256<P, BinaryField8b> {
48 type Elem = BinaryField8b;
49
50 fn update_slice(&mut self, msg: &[BinaryField8b], cur_block: usize) {
51 msg.iter()
52 .map(|x| AESTowerField8b::from(*x))
53 .enumerate()
54 .for_each(|(i, x)| {
55 let block_idx = (cur_block + i) % BLOCK_LEN_U8;
56 self.current_block.set(block_idx, x);
57 if block_idx == BLOCK_LEN_U8 - 1 {
58 self.state = compression_func(self.state, self.current_block);
59 }
60 });
61 }
62}
63
64impl<P> UpdateOverSlice for Groestl256<P, AESTowerField8b> {
65 type Elem = AESTowerField8b;
66
67 fn update_slice(&mut self, msg: &[Self::Elem], cur_block: usize) {
68 self.update_native(msg, cur_block);
69 }
70}
71
72impl<P, F> Groestl256<P, F> {
73 fn update_native(&mut self, mut msg: &[AESTowerField8b], mut cur_block: usize) {
74 while !msg.is_empty() {
75 let to_process = cmp::min(BLOCK_LEN_U8 - cur_block, msg.len());
76
77 let next_block = PackedAESBinaryField64x8b::unpack_scalars_mut(slice::from_mut(
79 &mut self.current_block,
80 ));
81 next_block[cur_block..cur_block + to_process].copy_from_slice(&msg[..to_process]);
82
83 if cur_block + to_process == BLOCK_LEN_U8 {
85 self.state = compression_func(self.state, self.current_block);
86 cur_block = 0;
87 }
88
89 msg = &msg[to_process..];
90 }
91 }
92}
93
94impl<P, F> Default for Groestl256<P, F> {
95 fn default() -> Self {
96 let mut iv = PackedAESBinaryField64x8b::default();
97 iv.set(62, AESTowerField8b::new(0x01));
99 Self {
100 state: iv,
101 current_block: PackedAESBinaryField64x8b::default(),
102 current_len: 0,
103 _p_marker: PhantomData,
104 _f_marker: PhantomData,
105 }
106 }
107}
108
109fn compression_func(
111 h: PackedAESBinaryField64x8b,
112 m: PackedAESBinaryField64x8b,
113) -> PackedAESBinaryField64x8b {
114 let (a, b) = Groestl256Core.permutation_pq(h + m, m);
115 a + b + h
116}
117
118impl<P, F> Groestl256<P, F>
119where
120 F: BinaryField,
121 P: PackedExtension<F, PackedSubfield: PackedFieldIndexable>,
122 P::Scalar: ExtensionField<F>,
123{
124 fn finalize_packed(&mut self) -> PackedAESBinaryField32x8b {
125 let bits_per_elem = P::WIDTH * P::Scalar::DEGREE * (1 << BinaryField8b::TOWER_LEVEL);
126 let n = self
127 .current_len
128 .checked_mul(bits_per_elem as u64)
129 .expect("Overflow on message length");
130 let mut padding = [AESTowerField8b::default(); 128];
132 padding[0] = AESTowerField8b::new(0x80);
133 let w = (-(n as i64) - 65).rem_euclid(BLOCK_LEN_U8 as i64 * 8);
134 let w = w as u64;
135 let zero_pads = ((w - 7) / 8) as usize;
136 let num_blocks = (n + w + 65) / (BLOCK_LEN_U8 as u64 * 8);
137 padding[zero_pads + 1..zero_pads + 9]
138 .copy_from_slice(&num_blocks.to_be_bytes().map(AESTowerField8b::new));
139
140 let cur_block = (self.current_len as usize * P::WIDTH * P::Scalar::DEGREE) % BLOCK_LEN_U8;
141 self.update_native(&padding[..zero_pads + 9], cur_block);
142
143 let out_full = Groestl256Core.permutation_p(self.state) + self.state;
144 let mut out = [PackedAESBinaryField32x8b::default()];
145 let out_as_slice = PackedFieldIndexable::unpack_scalars_mut(&mut out);
146 out_as_slice.copy_from_slice(&PackedFieldIndexable::unpack_scalars(&[out_full])[32..]);
147
148 out[0]
149 }
150}
151
152impl<P, F> Hasher<P> for Groestl256<P, F>
153where
154 F: BinaryField + From<AESTowerField8b> + Into<AESTowerField8b>,
155 P: PackedExtension<F, PackedSubfield: PackedFieldIndexable>,
156 OptimalUnderlier256b: PackScalar<F> + Divisible<F::Underlier>,
157 Self: UpdateOverSlice<Elem = F>,
158{
159 type Digest = GroestlDigest<F>;
160
161 fn new() -> Self {
162 Self::default()
163 }
164
165 fn update(&mut self, data: impl AsRef<[P]>) {
166 let msg = data.as_ref();
167 if msg.is_empty() {
168 return;
169 }
170
171 let cur_block = (self.current_len as usize * P::WIDTH * P::Scalar::DEGREE) % BLOCK_LEN_U8;
172 let msg_remaining = P::unpack_base_scalars(msg);
173
174 self.update_slice(msg_remaining, cur_block);
175
176 self.current_len = self
177 .current_len
178 .checked_add(msg.len() as u64)
179 .expect("Overflow on message length");
180 }
181
182 fn chain_update(mut self, data: impl AsRef<[P]>) -> Self {
183 self.update(data);
184 self
185 }
186
187 fn finalize(mut self) -> Self::Digest {
188 let out = self.finalize_packed();
189 Self::Digest::from_fn(|i| F::from(out.get(i)))
190 }
191
192 fn finalize_into(self, out: &mut MaybeUninit<Self::Digest>) {
193 let finalized = self.finalize();
194 out.write(finalized);
195 }
196
197 fn finalize_reset(&mut self) -> Self::Digest {
198 let out_native = self.finalize_packed();
199 let out = Self::Digest::from_fn(|i| F::from(out_native.get(i)));
200 self.reset();
201 out
202 }
203
204 fn finalize_into_reset(&mut self, out: &mut MaybeUninit<Self::Digest>) {
205 let finalized = self.finalize_packed();
206 out.write(Self::Digest::from_fn(|i| F::from(finalized.get(i))));
207 self.reset();
208 }
209
210 fn reset(&mut self) {
211 *self = Self::new();
212 }
213}
214
215#[derive(Debug, Default, Clone)]
232pub struct GroestlDigestCompression<F: BinaryField + From<AESTowerField8b> + Into<AESTowerField8b>>
233{
234 _f_marker: PhantomData<F>,
235}
236
237impl<F> PseudoCompressionFunction<GroestlDigest<F>, 2> for GroestlDigestCompression<F>
238where
239 OptimalUnderlier256b: PackScalar<F> + Divisible<F::Underlier>,
240 F: BinaryField + From<AESTowerField8b> + Into<AESTowerField8b>,
241{
242 fn compress(&self, input: [GroestlDigest<F>; 2]) -> GroestlDigest<F> {
243 let input_as_slice_bin: [F; 64] = PackedFieldIndexable::unpack_scalars(&input)
244 .try_into()
245 .unwrap();
246 let input_as_slice: [AESTowerField8b; 64] = input_as_slice_bin.map(Into::into);
247 let mut state = PackedAESBinaryField64x8b::default();
248 let state_as_slice = PackedFieldIndexable::unpack_scalars_mut(slice::from_mut(&mut state));
249 state_as_slice.copy_from_slice(&input_as_slice);
250 let new_state = Groestl256Core.permutation_p(state) + state;
251
252 let new_state_slice: [AESTowerField8b; 32] =
253 PackedFieldIndexable::unpack_scalars(slice::from_ref(&new_state))[32..]
254 .try_into()
255 .unwrap();
256 let new_state_slice_bin: [F; 32] = new_state_slice.map(F::from);
257 let mut out_bin = GroestlDigest::<F>::default();
258 let out_bin_slice = PackedFieldIndexable::unpack_scalars_mut(slice::from_mut(&mut out_bin));
259 out_bin_slice.copy_from_slice(&new_state_slice_bin);
260 out_bin
261 }
262}
263
264impl<F> CompressionFunction<GroestlDigest<F>, 2> for GroestlDigestCompression<F>
265where
266 OptimalUnderlier256b: PackScalar<F> + Divisible<F::Underlier>,
267 F: BinaryField + From<AESTowerField8b> + Into<AESTowerField8b>,
268{
269}
270
271#[cfg(test)]
272mod tests {
273 use std::array;
274
275 use binius_field::{
276 linear_transformation::Transformation, make_aes_to_binary_packed_transformer,
277 PackedBinaryField32x8b, PackedBinaryField64x8b,
278 };
279 use rand::thread_rng;
280
281 use super::*;
282 use crate::{HashDigest, HasherDigest};
283
284 #[test]
285 fn test_groestl_digest_compression() {
286 let zero_perm = Groestl256Core.permutation_p(PackedAESBinaryField64x8b::default());
287 let aes_to_bin_transform = make_aes_to_binary_packed_transformer::<
288 PackedAESBinaryField64x8b,
289 PackedBinaryField64x8b,
290 >();
291 let zero_perm_bin: PackedBinaryField64x8b = aes_to_bin_transform.transform(&zero_perm);
292 let digest = GroestlDigestCompression::<BinaryField8b>::default().compress([
293 GroestlDigest::<BinaryField8b>::default(),
294 GroestlDigest::<BinaryField8b>::default(),
295 ]);
296 for (a, b) in digest.iter().zip(zero_perm_bin.iter().skip(32)) {
297 assert_eq!(a, b);
298 }
299 }
300
301 #[test]
302 fn test_aes_binary_conversion() {
303 let mut rng = thread_rng();
304 let input_aes: [PackedAESBinaryField32x8b; 90] =
305 array::from_fn(|_| PackedAESBinaryField32x8b::random(&mut rng));
306 let input_bin: [PackedBinaryField32x8b; 90] = array::from_fn(|i| {
307 let vec_bin = input_aes[i]
308 .iter()
309 .map(BinaryField8b::from)
310 .collect::<Vec<_>>();
311 PackedBinaryField32x8b::from_fn(|j| vec_bin[j])
312 });
313
314 let digest_aes = HasherDigest::<_, Groestl256<_, AESTowerField8b>>::hash(input_aes);
315 let digest_bin = HasherDigest::<_, Groestl256<_, BinaryField8b>>::hash(input_bin);
316
317 let digest_aes_bin = digest_aes
318 .iter()
319 .map(BinaryField8b::from)
320 .collect::<Vec<_>>();
321 assert_eq!(PackedBinaryField32x8b::from_fn(|j| digest_aes_bin[j]), digest_bin);
322 }
323}