8#include <botan/threefish_512.h>
16inline
void interleave_epi64(__m256i& X0, __m256i& X1)
21 const __m256i T0 = _mm256_unpacklo_epi64(X0, X1);
22 const __m256i T1 = _mm256_unpackhi_epi64(X0, X1);
24 X0 = _mm256_permute4x64_epi64(T0, _MM_SHUFFLE(3,1,2,0));
25 X1 = _mm256_permute4x64_epi64(T1, _MM_SHUFFLE(3,1,2,0));
29inline
void deinterleave_epi64(__m256i& X0, __m256i& X1)
31 const __m256i T0 = _mm256_permute4x64_epi64(X0, _MM_SHUFFLE(3,1,2,0));
32 const __m256i T1 = _mm256_permute4x64_epi64(X1, _MM_SHUFFLE(3,1,2,0));
34 X0 = _mm256_unpacklo_epi64(T0, T1);
35 X1 = _mm256_unpackhi_epi64(T0, T1);
39inline
void rotate_keys(__m256i& R0, __m256i& R1, __m256i R2)
66 __m256i T0 = _mm256_permute4x64_epi64(R0, _MM_SHUFFLE(0,0,0,0));
67 __m256i T1 = _mm256_permute4x64_epi64(R1, _MM_SHUFFLE(0,3,2,1));
68 __m256i T2 = _mm256_permute4x64_epi64(R2, _MM_SHUFFLE(0,3,2,1));
70 R0 = _mm256_blend_epi32(T1, T0, 0xC0);
71 R1 = _mm256_blend_epi32(T2, T1, 0xC0);
78void Threefish_512::avx2_encrypt_n(const uint8_t in[], uint8_t out[],
size_t blocks)
const
82 const uint64_t* K = m_K.data();
83 const uint64_t* T_64 = m_T.data();
85 const __m256i ROTATE_1 = _mm256_set_epi64x(37,19,36,46);
86 const __m256i ROTATE_2 = _mm256_set_epi64x(42,14,27,33);
87 const __m256i ROTATE_3 = _mm256_set_epi64x(39,36,49,17);
88 const __m256i ROTATE_4 = _mm256_set_epi64x(56,54, 9,44);
89 const __m256i ROTATE_5 = _mm256_set_epi64x(24,34,30,39);
90 const __m256i ROTATE_6 = _mm256_set_epi64x(17,10,50,13);
91 const __m256i ROTATE_7 = _mm256_set_epi64x(43,39,29,25);
92 const __m256i ROTATE_8 = _mm256_set_epi64x(22,56,35, 8);
94#define THREEFISH_ROUND(X0, X1, SHL) \
96 const __m256i SHR = _mm256_sub_epi64(_mm256_set1_epi64x(64), SHL); \
97 X0 = _mm256_add_epi64(X0, X1); \
98 X1 = _mm256_or_si256(_mm256_sllv_epi64(X1, SHL), _mm256_srlv_epi64(X1, SHR)); \
99 X1 = _mm256_xor_si256(X1, X0); \
100 X0 = _mm256_permute4x64_epi64(X0, _MM_SHUFFLE(0, 3, 2, 1)); \
101 X1 = _mm256_permute4x64_epi64(X1, _MM_SHUFFLE(1, 2, 3, 0)); \
104#define THREEFISH_ROUND_2(X0, X1, X2, X3, SHL) \
106 const __m256i SHR = _mm256_sub_epi64(_mm256_set1_epi64x(64), SHL); \
107 X0 = _mm256_add_epi64(X0, X1); \
108 X2 = _mm256_add_epi64(X2, X3); \
109 X1 = _mm256_or_si256(_mm256_sllv_epi64(X1, SHL), _mm256_srlv_epi64(X1, SHR)); \
110 X3 = _mm256_or_si256(_mm256_sllv_epi64(X3, SHL), _mm256_srlv_epi64(X3, SHR)); \
111 X1 = _mm256_xor_si256(X1, X0); \
112 X3 = _mm256_xor_si256(X3, X2); \
113 X0 = _mm256_permute4x64_epi64(X0, _MM_SHUFFLE(0, 3, 2, 1)); \
114 X2 = _mm256_permute4x64_epi64(X2, _MM_SHUFFLE(0, 3, 2, 1)); \
115 X1 = _mm256_permute4x64_epi64(X1, _MM_SHUFFLE(1, 2, 3, 0)); \
116 X3 = _mm256_permute4x64_epi64(X3, _MM_SHUFFLE(1, 2, 3, 0)); \
119#define THREEFISH_INJECT_KEY(X0, X1, R, K0, K1, T0I, T1I) \
121 const __m256i T0 = _mm256_permute4x64_epi64(T, _MM_SHUFFLE(T0I, 0, 0, 0)); \
122 const __m256i T1 = _mm256_permute4x64_epi64(T, _MM_SHUFFLE(0, T1I, 0, 0)); \
123 X0 = _mm256_add_epi64(X0, K0); \
124 X1 = _mm256_add_epi64(X1, K1); \
125 X1 = _mm256_add_epi64(X1, _mm256_set_epi64x(R,0,0,0)); \
126 X0 = _mm256_add_epi64(X0, T0); \
127 X1 = _mm256_add_epi64(X1, T1); \
130#define THREEFISH_INJECT_KEY_2(X0, X1, X2, X3, R, K0, K1, T0I, T1I) \
132 const __m256i T0 = _mm256_permute4x64_epi64(T, _MM_SHUFFLE(T0I, 0, 0, 0)); \
133 __m256i T1 = _mm256_permute4x64_epi64(T, _MM_SHUFFLE(0, T1I, 0, 0)); \
134 X0 = _mm256_add_epi64(X0, K0); \
135 X2 = _mm256_add_epi64(X2, K0); \
136 X1 = _mm256_add_epi64(X1, K1); \
137 X3 = _mm256_add_epi64(X3, K1); \
138 T1 = _mm256_add_epi64(T1, _mm256_set_epi64x(R,0,0,0)); \
139 X0 = _mm256_add_epi64(X0, T0); \
140 X2 = _mm256_add_epi64(X2, T0); \
141 X1 = _mm256_add_epi64(X1, T1); \
142 X3 = _mm256_add_epi64(X3, T1); \
145#define THREEFISH_ENC_8_ROUNDS(X0, X1, R, K0, K1, K2, T0, T1, T2) \
147 rotate_keys(K1, K2, K0); \
148 THREEFISH_ROUND(X0, X1, ROTATE_1); \
149 THREEFISH_ROUND(X0, X1, ROTATE_2); \
150 THREEFISH_ROUND(X0, X1, ROTATE_3); \
151 THREEFISH_ROUND(X0, X1, ROTATE_4); \
152 THREEFISH_INJECT_KEY(X0, X1, R, K0, K1, T0, T1); \
154 THREEFISH_ROUND(X0, X1, ROTATE_5); \
155 THREEFISH_ROUND(X0, X1, ROTATE_6); \
156 THREEFISH_ROUND(X0, X1, ROTATE_7); \
157 THREEFISH_ROUND(X0, X1, ROTATE_8); \
158 THREEFISH_INJECT_KEY(X0, X1, R+1, K1, K2, T2, T0); \
161#define THREEFISH_ENC_2_8_ROUNDS(X0, X1, X2, X3, R, K0, K1, K2, T0, T1, T2) \
163 rotate_keys(K1, K2, K0); \
164 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_1); \
165 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_2); \
166 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_3); \
167 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_4); \
168 THREEFISH_INJECT_KEY_2(X0, X1, X2, X3, R, K0, K1, T0, T1); \
170 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_5); \
171 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_6); \
172 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_7); \
173 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_8); \
174 THREEFISH_INJECT_KEY_2(X0, X1, X2, X3, R+1, K1, K2, T2, T0); \
177 __m256i K0 = _mm256_set_epi64x(K[5], K[3], K[1], K[8]);
178 __m256i K1 = _mm256_set_epi64x(K[6], K[4], K[2], K[0]);
179 __m256i K2 = _mm256_set_epi64x(K[7], K[5], K[3], K[1]);
181 const __m256i* in_mm =
reinterpret_cast<const __m256i*
>(in);
182 __m256i* out_mm =
reinterpret_cast<__m256i*
>(out);
186 __m256i X0 = _mm256_loadu_si256(in_mm++);
187 __m256i X1 = _mm256_loadu_si256(in_mm++);
188 __m256i X2 = _mm256_loadu_si256(in_mm++);
189 __m256i X3 = _mm256_loadu_si256(in_mm++);
191 const __m256i
T = _mm256_set_epi64x(T_64[0], T_64[1], T_64[2], 0);
193 interleave_epi64(X0, X1);
194 interleave_epi64(X2, X3);
198 THREEFISH_ENC_2_8_ROUNDS(X0, X1, X2, X3, 1, K2,K0,K1, 1, 2, 3);
199 THREEFISH_ENC_2_8_ROUNDS(X0, X1, X2, X3, 3, K1,K2,K0, 2, 3, 1);
200 THREEFISH_ENC_2_8_ROUNDS(X0, X1, X2, X3, 5, K0,K1,K2, 3, 1, 2);
201 THREEFISH_ENC_2_8_ROUNDS(X0, X1, X2, X3, 7, K2,K0,K1, 1, 2, 3);
202 THREEFISH_ENC_2_8_ROUNDS(X0, X1, X2, X3, 9, K1,K2,K0, 2, 3, 1);
203 THREEFISH_ENC_2_8_ROUNDS(X0, X1, X2, X3, 11, K0,K1,K2, 3, 1, 2);
204 THREEFISH_ENC_2_8_ROUNDS(X0, X1, X2, X3, 13, K2,K0,K1, 1, 2, 3);
205 THREEFISH_ENC_2_8_ROUNDS(X0, X1, X2, X3, 15, K1,K2,K0, 2, 3, 1);
206 THREEFISH_ENC_2_8_ROUNDS(X0, X1, X2, X3, 17, K0,K1,K2, 3, 1, 2);
208 deinterleave_epi64(X0, X1);
209 deinterleave_epi64(X2, X3);
211 _mm256_storeu_si256(out_mm++, X0);
212 _mm256_storeu_si256(out_mm++, X1);
213 _mm256_storeu_si256(out_mm++, X2);
214 _mm256_storeu_si256(out_mm++, X3);
219 for(
size_t i = 0; i != blocks; ++i)
221 __m256i X0 = _mm256_loadu_si256(in_mm++);
222 __m256i X1 = _mm256_loadu_si256(in_mm++);
224 const __m256i
T = _mm256_set_epi64x(T_64[0], T_64[1], T_64[2], 0);
226 interleave_epi64(X0, X1);
240 deinterleave_epi64(X0, X1);
242 _mm256_storeu_si256(out_mm++, X0);
243 _mm256_storeu_si256(out_mm++, X1);
248#undef THREEFISH_ENC_8_ROUNDS
249#undef THREEFISH_ROUND
250#undef THREEFISH_INJECT_KEY
251#undef THREEFISH_DEC_2_8_ROUNDS
252#undef THREEFISH_ROUND_2
253#undef THREEFISH_INJECT_KEY_2
257void Threefish_512::avx2_decrypt_n(const uint8_t in[], uint8_t out[],
size_t blocks)
const
261 const uint64_t* K = m_K.data();
262 const uint64_t* T_64 = m_T.data();
264 const __m256i ROTATE_1 = _mm256_set_epi64x(37,19,36,46);
265 const __m256i ROTATE_2 = _mm256_set_epi64x(42,14,27,33);
266 const __m256i ROTATE_3 = _mm256_set_epi64x(39,36,49,17);
267 const __m256i ROTATE_4 = _mm256_set_epi64x(56,54, 9,44);
268 const __m256i ROTATE_5 = _mm256_set_epi64x(24,34,30,39);
269 const __m256i ROTATE_6 = _mm256_set_epi64x(17,10,50,13);
270 const __m256i ROTATE_7 = _mm256_set_epi64x(43,39,29,25);
271 const __m256i ROTATE_8 = _mm256_set_epi64x(22,56,35, 8);
273#define THREEFISH_ROUND(X0, X1, SHR) \
275 const __m256i SHL = _mm256_sub_epi64(_mm256_set1_epi64x(64), SHR); \
276 X0 = _mm256_permute4x64_epi64(X0, _MM_SHUFFLE(2, 1, 0, 3)); \
277 X1 = _mm256_permute4x64_epi64(X1, _MM_SHUFFLE(1, 2, 3, 0)); \
278 X1 = _mm256_xor_si256(X1, X0); \
279 X1 = _mm256_or_si256(_mm256_sllv_epi64(X1, SHL), _mm256_srlv_epi64(X1, SHR)); \
280 X0 = _mm256_sub_epi64(X0, X1); \
283#define THREEFISH_ROUND_2(X0, X1, X2, X3, SHR) \
285 const __m256i SHL = _mm256_sub_epi64(_mm256_set1_epi64x(64), SHR); \
286 X0 = _mm256_permute4x64_epi64(X0, _MM_SHUFFLE(2, 1, 0, 3)); \
287 X2 = _mm256_permute4x64_epi64(X2, _MM_SHUFFLE(2, 1, 0, 3)); \
288 X1 = _mm256_permute4x64_epi64(X1, _MM_SHUFFLE(1, 2, 3, 0)); \
289 X3 = _mm256_permute4x64_epi64(X3, _MM_SHUFFLE(1, 2, 3, 0)); \
290 X1 = _mm256_xor_si256(X1, X0); \
291 X3 = _mm256_xor_si256(X3, X2); \
292 X1 = _mm256_or_si256(_mm256_sllv_epi64(X1, SHL), _mm256_srlv_epi64(X1, SHR)); \
293 X3 = _mm256_or_si256(_mm256_sllv_epi64(X3, SHL), _mm256_srlv_epi64(X3, SHR)); \
294 X0 = _mm256_sub_epi64(X0, X1); \
295 X2 = _mm256_sub_epi64(X2, X3); \
298#define THREEFISH_INJECT_KEY(X0, X1, R, K0, K1, T0I, T1I) \
300 const __m256i T0 = _mm256_permute4x64_epi64(T, _MM_SHUFFLE(T0I, 0, 0, 0)); \
301 const __m256i T1 = _mm256_permute4x64_epi64(T, _MM_SHUFFLE(0, T1I, 0, 0)); \
302 X0 = _mm256_sub_epi64(X0, K0); \
303 X1 = _mm256_sub_epi64(X1, K1); \
304 X1 = _mm256_sub_epi64(X1, _mm256_set_epi64x(R, 0, 0, 0)); \
305 X0 = _mm256_sub_epi64(X0, T0); \
306 X1 = _mm256_sub_epi64(X1, T1); \
309#define THREEFISH_DEC_8_ROUNDS(X0, X1, R, K1, K2, K3, T0, T1, T2) \
311 THREEFISH_INJECT_KEY(X0, X1, R+1, K2, K3, T2, T0); \
312 THREEFISH_ROUND(X0, X1, ROTATE_8); \
313 THREEFISH_ROUND(X0, X1, ROTATE_7); \
314 THREEFISH_ROUND(X0, X1, ROTATE_6); \
315 THREEFISH_ROUND(X0, X1, ROTATE_5); \
317 THREEFISH_INJECT_KEY(X0, X1, R, K1, K2, T0, T1); \
318 THREEFISH_ROUND(X0, X1, ROTATE_4); \
319 THREEFISH_ROUND(X0, X1, ROTATE_3); \
320 THREEFISH_ROUND(X0, X1, ROTATE_2); \
321 THREEFISH_ROUND(X0, X1, ROTATE_1); \
324#define THREEFISH_INJECT_KEY_2(X0, X1, X2, X3, R, K0, K1, T0I, T1I) \
326 const __m256i T0 = _mm256_permute4x64_epi64(T, _MM_SHUFFLE(T0I, 0, 0, 0)); \
327 __m256i T1 = _mm256_permute4x64_epi64(T, _MM_SHUFFLE(0, T1I, 0, 0)); \
328 X0 = _mm256_sub_epi64(X0, K0); \
329 X2 = _mm256_sub_epi64(X2, K0); \
330 X1 = _mm256_sub_epi64(X1, K1); \
331 X3 = _mm256_sub_epi64(X3, K1); \
332 T1 = _mm256_add_epi64(T1, _mm256_set_epi64x(R,0,0,0)); \
333 X0 = _mm256_sub_epi64(X0, T0); \
334 X2 = _mm256_sub_epi64(X2, T0); \
335 X1 = _mm256_sub_epi64(X1, T1); \
336 X3 = _mm256_sub_epi64(X3, T1); \
339#define THREEFISH_DEC_2_8_ROUNDS(X0, X1, X2, X3, R, K1, K2, K3, T0, T1, T2) \
341 THREEFISH_INJECT_KEY_2(X0, X1, X2, X3, R+1, K2, K3, T2, T0); \
342 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_8); \
343 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_7); \
344 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_6); \
345 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_5); \
347 THREEFISH_INJECT_KEY_2(X0, X1, X2, X3, R, K1, K2, T0, T1); \
348 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_4); \
349 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_3); \
350 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_2); \
351 THREEFISH_ROUND_2(X0, X1, X2, X3, ROTATE_1); \
359 const __m256i K0 = _mm256_set_epi64x(K[6], K[4], K[2], K[0]);
360 const __m256i K1 = _mm256_set_epi64x(K[7], K[5], K[3], K[1]);
361 const __m256i K2 = _mm256_set_epi64x(K[8], K[6], K[4], K[2]);
362 const __m256i K3 = _mm256_set_epi64x(K[0], K[7], K[5], K[3]);
363 const __m256i K4 = _mm256_set_epi64x(K[1], K[8], K[6], K[4]);
364 const __m256i K5 = _mm256_set_epi64x(K[2], K[0], K[7], K[5]);
365 const __m256i K6 = _mm256_set_epi64x(K[3], K[1], K[8], K[6]);
366 const __m256i K7 = _mm256_set_epi64x(K[4], K[2], K[0], K[7]);
367 const __m256i K8 = _mm256_set_epi64x(K[5], K[3], K[1], K[8]);
369 const __m256i* in_mm =
reinterpret_cast<const __m256i*
>(in);
370 __m256i* out_mm =
reinterpret_cast<__m256i*
>(out);
374 __m256i X0 = _mm256_loadu_si256(in_mm++);
375 __m256i X1 = _mm256_loadu_si256(in_mm++);
376 __m256i X2 = _mm256_loadu_si256(in_mm++);
377 __m256i X3 = _mm256_loadu_si256(in_mm++);
379 const __m256i
T = _mm256_set_epi64x(T_64[0], T_64[1], T_64[2], 0);
381 interleave_epi64(X0, X1);
382 interleave_epi64(X2, X3);
384 THREEFISH_DEC_2_8_ROUNDS(X0, X1, X2, X3, 17, K8,K0,K1, 3, 1, 2);
385 THREEFISH_DEC_2_8_ROUNDS(X0, X1, X2, X3, 15, K6,K7,K8, 2, 3, 1);
386 THREEFISH_DEC_2_8_ROUNDS(X0, X1, X2, X3, 13, K4,K5,K6, 1, 2, 3);
387 THREEFISH_DEC_2_8_ROUNDS(X0, X1, X2, X3, 11, K2,K3,K4, 3, 1, 2);
388 THREEFISH_DEC_2_8_ROUNDS(X0, X1, X2, X3, 9, K0,K1,K2, 2, 3, 1);
389 THREEFISH_DEC_2_8_ROUNDS(X0, X1, X2, X3, 7, K7,K8,K0, 1, 2, 3);
390 THREEFISH_DEC_2_8_ROUNDS(X0, X1, X2, X3, 5, K5,K6,K7, 3, 1, 2);
391 THREEFISH_DEC_2_8_ROUNDS(X0, X1, X2, X3, 3, K3,K4,K5, 2, 3, 1);
392 THREEFISH_DEC_2_8_ROUNDS(X0, X1, X2, X3, 1, K1,K2,K3, 1, 2, 3);
396 deinterleave_epi64(X0, X1);
397 deinterleave_epi64(X2, X3);
399 _mm256_storeu_si256(out_mm++, X0);
400 _mm256_storeu_si256(out_mm++, X1);
401 _mm256_storeu_si256(out_mm++, X2);
402 _mm256_storeu_si256(out_mm++, X3);
407 for(
size_t i = 0; i != blocks; ++i)
409 __m256i X0 = _mm256_loadu_si256(in_mm++);
410 __m256i X1 = _mm256_loadu_si256(in_mm++);
412 const __m256i
T = _mm256_set_epi64x(T_64[0], T_64[1], T_64[2], 0);
414 interleave_epi64(X0, X1);
428 deinterleave_epi64(X0, X1);
430 _mm256_storeu_si256(out_mm++, X0);
431 _mm256_storeu_si256(out_mm++, X1);
434#undef THREEFISH_DEC_8_ROUNDS
435#undef THREEFISH_ROUND
436#undef THREEFISH_INJECT_KEY
437#undef THREEFISH_DEC_2_8_ROUNDS
438#undef THREEFISH_ROUND_2
439#undef THREEFISH_INJECT_KEY_2
#define BOTAN_FUNC_ISA(isa)
#define THREEFISH_ENC_8_ROUNDS(R1, R2)
#define THREEFISH_DEC_8_ROUNDS(R1, R2)
#define THREEFISH_INJECT_KEY(r)
#define THREEFISH_ENC_2_8_ROUNDS(X0, X1, X2, X3, R, K0, K1, K2, T0, T1, T2)
#define THREEFISH_INJECT_KEY_2(X0, X1, X2, X3, R, K0, K1, T0I, T1I)
#define THREEFISH_DEC_2_8_ROUNDS(X0, X1, X2, X3, R, K1, K2, K3, T0, T1, T2)