1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
// Copyright 2024 Ulvetanna Inc.

use super::{super::hasher::Hasher, arch::Groestl256Core};
use binius_field::{
	arch::OptimalUnderlier256b,
	as_packed_field::{PackScalar, PackedType},
	underlier::Divisible,
	AESTowerField8b, BinaryField, BinaryField8b, ExtensionField, PackedAESBinaryField32x8b,
	PackedAESBinaryField64x8b, PackedExtension, PackedExtensionIndexable, PackedField,
	PackedFieldIndexable, TowerField,
};
use p3_symmetric::{CompressionFunction, PseudoCompressionFunction};
use std::{cmp, marker::PhantomData, slice};

/// This module implements the 256-bit variant of [Grøstl](https://www.groestl.info/Groestl.pdf)

/// The type of output digest for `Grøstl256` over `F` which should be isomorphic to `AESTowerField8b`
pub type GroestlDigest<F> = PackedType<OptimalUnderlier256b, F>;

/// An alias for `Grøstl256` defined over `BinaryField8b`
pub type GroestlHasher<P> = Groestl256<P, BinaryField8b>;

const BLOCK_LEN_U8: usize = 64;

/// The Grøstl256 hash function which can be thought of as natively defined over `AESTowerField8b`
/// and isomorphically maps to `BinaryField8b`. The type `P` is the input to the update
/// function which has to be over a packed extension field of `BinaryField8b` or `AESTowerField8b`
#[derive(Debug, Clone)]
pub struct Groestl256<P, F> {
	state: PackedAESBinaryField64x8b,
	current_block: PackedAESBinaryField64x8b,
	current_len: u64,
	_p_marker: PhantomData<P>,
	_f_marker: PhantomData<F>,
}

trait UpdateOverSlice {
	type Elem;

	fn update_slice(&mut self, msg: &[Self::Elem], cur_block: usize);
}

impl<P> UpdateOverSlice for Groestl256<P, BinaryField8b> {
	type Elem = BinaryField8b;

	fn update_slice(&mut self, msg: &[BinaryField8b], cur_block: usize) {
		msg.iter()
			.map(|x| AESTowerField8b::from(*x))
			.enumerate()
			.for_each(|(i, x)| {
				let block_idx = (cur_block + i) % BLOCK_LEN_U8;
				self.current_block.set(block_idx, x);
				if block_idx == BLOCK_LEN_U8 - 1 {
					self.state = compression_func(self.state, self.current_block);
				}
			});
	}
}

impl<P> UpdateOverSlice for Groestl256<P, AESTowerField8b> {
	type Elem = AESTowerField8b;

	fn update_slice(&mut self, msg: &[Self::Elem], cur_block: usize) {
		self.update_native(msg, cur_block);
	}
}

impl<P, F> Groestl256<P, F> {
	fn update_native(&mut self, mut msg: &[AESTowerField8b], mut cur_block: usize) {
		while !msg.is_empty() {
			let to_process = cmp::min(BLOCK_LEN_U8 - cur_block, msg.len());

			// Firstly copy data into next block
			let next_block = PackedAESBinaryField64x8b::unpack_scalars_mut(slice::from_mut(
				&mut self.current_block,
			));
			next_block[cur_block..cur_block + to_process].copy_from_slice(&msg[..to_process]);

			// absorb if ready
			if cur_block + to_process == BLOCK_LEN_U8 {
				self.state = compression_func(self.state, self.current_block);
				cur_block = 0;
			}

			msg = &msg[to_process..];
		}
	}
}

impl<P, F> Default for Groestl256<P, F> {
	fn default() -> Self {
		let mut iv = PackedAESBinaryField64x8b::default();
		// IV for Grøstl256
		iv.set(62, AESTowerField8b::new(0x01));
		Self {
			state: iv,
			current_block: PackedAESBinaryField64x8b::default(),
			current_len: 0,
			_p_marker: PhantomData,
			_f_marker: PhantomData,
		}
	}
}

/// Compression function as defined for Grøstl256
fn compression_func(
	h: PackedAESBinaryField64x8b,
	m: PackedAESBinaryField64x8b,
) -> PackedAESBinaryField64x8b {
	let (a, b) = Groestl256Core.permutation_pq(h + m, m);
	a + b + h
}

impl<P, F> Groestl256<P, F>
where
	F: BinaryField,
	P: PackedExtension<F, PackedSubfield: PackedFieldIndexable>,
	P::Scalar: ExtensionField<F>,
{
	fn finalize_packed(&mut self) -> PackedAESBinaryField32x8b {
		let bits_per_elem = P::WIDTH * P::Scalar::DEGREE * (1 << BinaryField8b::TOWER_LEVEL);
		let n = self
			.current_len
			.checked_mul(bits_per_elem as u64)
			.expect("Overflow on message length");
		// Enough for 2 blocks
		let mut padding = [AESTowerField8b::default(); 128];
		padding[0] = AESTowerField8b::new(0x80);
		let w = (-(n as i64) - 65).rem_euclid(BLOCK_LEN_U8 as i64 * 8);
		let w = w as u64;
		let zero_pads = ((w - 7) / 8) as usize;
		let num_blocks = (n + w + 65) / (BLOCK_LEN_U8 as u64 * 8);
		padding[zero_pads + 1..zero_pads + 9]
			.copy_from_slice(&num_blocks.to_be_bytes().map(AESTowerField8b::new));

		let cur_block = (self.current_len as usize * P::WIDTH * P::Scalar::DEGREE) % BLOCK_LEN_U8;
		self.update_native(&padding[..zero_pads + 9], cur_block);

		let out_full = Groestl256Core.permutation_p(self.state) + self.state;
		let mut out = [PackedAESBinaryField32x8b::default()];
		let out_as_slice = PackedFieldIndexable::unpack_scalars_mut(&mut out);
		out_as_slice.copy_from_slice(&PackedFieldIndexable::unpack_scalars(&[out_full])[32..]);

		out[0]
	}
}

impl<P, F> Hasher<P> for Groestl256<P, F>
where
	F: BinaryField + From<AESTowerField8b> + Into<AESTowerField8b>,
	P: PackedExtension<F, PackedSubfield: PackedFieldIndexable>,
	P::Scalar: ExtensionField<F>,
	OptimalUnderlier256b: PackScalar<F> + Divisible<F::Underlier>,
	Self: UpdateOverSlice<Elem = F>,
{
	type Digest = GroestlDigest<F>;

	fn new() -> Self {
		Self::default()
	}

	fn update(&mut self, data: impl AsRef<[P]>) {
		let msg = data.as_ref();
		if msg.is_empty() {
			return;
		}

		let cur_block = (self.current_len as usize * P::WIDTH * P::Scalar::DEGREE) % BLOCK_LEN_U8;
		let msg_remaining = P::unpack_base_scalars(msg);

		self.update_slice(msg_remaining, cur_block);

		self.current_len = self
			.current_len
			.checked_add(msg.len() as u64)
			.expect("Overflow on message length");
	}

	fn chain_update(mut self, data: impl AsRef<[P]>) -> Self {
		self.update(data);
		self
	}

	fn finalize(mut self) -> Self::Digest {
		let out = self.finalize_packed();
		Self::Digest::from_fn(|i| F::from(out.get(i)))
	}

	fn finalize_into(self, out: &mut Self::Digest) {
		let finalized = self.finalize();
		*out = finalized;
	}

	fn finalize_reset(&mut self) -> Self::Digest {
		let out_native = self.finalize_packed();
		let out = Self::Digest::from_fn(|i| F::from(out_native.get(i)));
		self.reset();
		out
	}

	fn finalize_into_reset(&mut self, out: &mut Self::Digest) {
		let finalized = self.finalize_packed();
		*out = Self::Digest::from_fn(|i| F::from(finalized.get(i)));
		self.reset();
	}

	fn reset(&mut self) {
		*self = Self::new();
	}
}

/// A compression function for Grøstl hash digests based on the Grøstl output transformation.
///
/// This is a 512-bit to 256-bit compression function. This does _not_ apply the full Grøstl hash
/// algorithm to a 512-bit input. Instead, this compression function applies just the Grøstl output
/// transformation, which is believed to be one-way and collision-resistant.
///
/// ## Security justification
///
/// The Grøstl output transformation in [Grøstl] Section 3.3 is argued to be one-way and
/// collision-resistant in multiple ways. First, in Section 4.6, the authors argue that the output
/// transformation is an instance of the Matyas-Meyer-Oseas construction followed by a truncation.
/// Second, in Section 5.1, the authors show that the output transformation is a call to the
/// 1024-to-512-bit compression function on a 0-padded input followed by an XOR with a constant and
/// a truncation.
///
/// [Grøstl]: <https://www.groestl.info/Groestl.pdf>
#[derive(Debug, Default, Clone)]
pub struct GroestlDigestCompression<F: BinaryField + From<AESTowerField8b> + Into<AESTowerField8b>>
{
	_f_marker: PhantomData<F>,
}

impl<F> PseudoCompressionFunction<GroestlDigest<F>, 2> for GroestlDigestCompression<F>
where
	OptimalUnderlier256b: PackScalar<F> + Divisible<F::Underlier>,
	F: BinaryField + From<AESTowerField8b> + Into<AESTowerField8b>,
{
	fn compress(&self, input: [GroestlDigest<F>; 2]) -> GroestlDigest<F> {
		let input_as_slice_bin: [F; 64] = PackedFieldIndexable::unpack_scalars(&input)
			.try_into()
			.unwrap();
		let input_as_slice: [AESTowerField8b; 64] = input_as_slice_bin.map(Into::into);
		let mut state = PackedAESBinaryField64x8b::default();
		let state_as_slice = PackedFieldIndexable::unpack_scalars_mut(slice::from_mut(&mut state));
		state_as_slice.copy_from_slice(&input_as_slice);
		let new_state = Groestl256Core.permutation_p(state) + state;

		let new_state_slice: [AESTowerField8b; 32] =
			PackedFieldIndexable::unpack_scalars(slice::from_ref(&new_state))[32..]
				.try_into()
				.unwrap();
		let new_state_slice_bin: [F; 32] = new_state_slice.map(F::from);
		let mut out_bin = GroestlDigest::<F>::default();
		let out_bin_slice = PackedFieldIndexable::unpack_scalars_mut(slice::from_mut(&mut out_bin));
		out_bin_slice.copy_from_slice(&new_state_slice_bin);
		out_bin
	}
}

impl<F> CompressionFunction<GroestlDigest<F>, 2> for GroestlDigestCompression<F>
where
	OptimalUnderlier256b: PackScalar<F> + Divisible<F::Underlier>,
	F: BinaryField + From<AESTowerField8b> + Into<AESTowerField8b>,
{
}

#[cfg(test)]
mod tests {
	use super::*;
	use crate::{HashDigest, HasherDigest};
	use binius_field::{
		linear_transformation::Transformation, make_aes_to_binary_packed_transformer,
		PackedBinaryField32x8b, PackedBinaryField64x8b,
	};
	use rand::thread_rng;
	use std::array;

	#[test]
	fn test_groestl_digest_compression() {
		let zero_perm = Groestl256Core.permutation_p(PackedAESBinaryField64x8b::default());
		let aes_to_bin_transform = make_aes_to_binary_packed_transformer::<
			PackedAESBinaryField64x8b,
			PackedBinaryField64x8b,
		>();
		let zero_perm_bin = aes_to_bin_transform.transform(&zero_perm);
		let digest = GroestlDigestCompression::<BinaryField8b>::default().compress([
			GroestlDigest::<BinaryField8b>::default(),
			GroestlDigest::<BinaryField8b>::default(),
		]);
		for (a, b) in digest.iter().zip(zero_perm_bin.iter().skip(32)) {
			assert_eq!(a, b);
		}
	}

	#[test]
	fn test_aes_binary_convertion() {
		let mut rng = thread_rng();
		let input_aes: [PackedAESBinaryField32x8b; 90] =
			array::from_fn(|_| PackedAESBinaryField32x8b::random(&mut rng));
		let input_bin: [PackedBinaryField32x8b; 90] = array::from_fn(|i| {
			let vec_bin = input_aes[i]
				.iter()
				.map(BinaryField8b::from)
				.collect::<Vec<_>>();
			PackedBinaryField32x8b::from_fn(|j| vec_bin[j])
		});

		let digest_aes = HasherDigest::<_, Groestl256<_, AESTowerField8b>>::hash(input_aes);
		let digest_bin = HasherDigest::<_, Groestl256<_, BinaryField8b>>::hash(input_bin);

		let digest_aes_bin = digest_aes
			.iter()
			.map(BinaryField8b::from)
			.collect::<Vec<_>>();
		assert_eq!(PackedBinaryField32x8b::from_fn(|j| digest_aes_bin[j]), digest_bin);
	}
}