Botan 2.19.3
Crypto and TLS for C&
aes_vperm.cpp
Go to the documentation of this file.
1/*
2* AES using vector permutes (SSSE3, NEON)
3* (C) 2010,2016,2019 Jack Lloyd
4*
5* Based on public domain x86-64 assembly written by Mike Hamburg,
6* described in "Accelerating AES with Vector Permute Instructions"
7* (CHES 2009). His original code is available at
8* https://crypto.stanford.edu/vpaes/
9*
10* Botan is released under the Simplified BSD License (see license.txt)
11*/
12
13#include <botan/aes.h>
14#include <botan/internal/ct_utils.h>
15#include <botan/internal/simd_32.h>
16
17#if defined(BOTAN_SIMD_USE_SSE2)
18 #include <tmmintrin.h>
19#endif
20
21namespace Botan {
22
23namespace {
24
25inline SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA) shuffle(SIMD_4x32 a, SIMD_4x32 b)
26 {
27#if defined(BOTAN_SIMD_USE_SSE2)
28 return SIMD_4x32(_mm_shuffle_epi8(a.raw(), b.raw()));
29#elif defined(BOTAN_SIMD_USE_NEON)
30 const uint8x16_t tbl = vreinterpretq_u8_u32(a.raw());
31 const uint8x16_t idx = vreinterpretq_u8_u32(b.raw());
32
33#if defined(BOTAN_TARGET_ARCH_IS_ARM32)
34 const uint8x8x2_t tbl2 = { vget_low_u8(tbl), vget_high_u8(tbl) };
35
36 return SIMD_4x32(vreinterpretq_u32_u8(
37 vcombine_u8(vtbl2_u8(tbl2, vget_low_u8(idx)),
38 vtbl2_u8(tbl2, vget_high_u8(idx)))));
39
40#else
41 return SIMD_4x32(vreinterpretq_u32_u8(vqtbl1q_u8(tbl, idx)));
42#endif
43
44#elif defined(BOTAN_SIMD_USE_ALTIVEC)
45
46 const auto zero = vec_splat_s8(0x00);
47 const auto mask = vec_cmplt((__vector signed char)b.raw(), zero);
48 const auto r = vec_perm((__vector signed char)a.raw(), (__vector signed char)a.raw(), (__vector unsigned char)b.raw());
49 return SIMD_4x32((__vector unsigned int)vec_sel(r, zero, mask));
50
51#else
52 #error "No shuffle implementation available"
53#endif
54 }
55
56inline SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA) alignr8(SIMD_4x32 a, SIMD_4x32 b)
57 {
58#if defined(BOTAN_SIMD_USE_SSE2)
59 return SIMD_4x32(_mm_alignr_epi8(a.raw(), b.raw(), 8));
60#elif defined(BOTAN_SIMD_USE_NEON)
61 return SIMD_4x32(vextq_u32(b.raw(), a.raw(), 2));
62#elif defined(BOTAN_SIMD_USE_ALTIVEC)
63 const __vector unsigned char mask = {8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23};
64 return SIMD_4x32(vec_perm(b.raw(), a.raw(), mask));
65#else
66 #error "No alignr8 implementation available"
67#endif
68 }
69
70const SIMD_4x32 k_ipt1 = SIMD_4x32(0x5A2A7000, 0xC2B2E898, 0x52227808, 0xCABAE090);
71const SIMD_4x32 k_ipt2 = SIMD_4x32(0x317C4D00, 0x4C01307D, 0xB0FDCC81, 0xCD80B1FC);
72
73const SIMD_4x32 k_inv1 = SIMD_4x32(0x0D080180, 0x0E05060F, 0x0A0B0C02, 0x04070309);
74const SIMD_4x32 k_inv2 = SIMD_4x32(0x0F0B0780, 0x01040A06, 0x02050809, 0x030D0E0C);
75
76const SIMD_4x32 sb1u = SIMD_4x32(0xCB503E00, 0xB19BE18F, 0x142AF544, 0xA5DF7A6E);
77const SIMD_4x32 sb1t = SIMD_4x32(0xFAE22300, 0x3618D415, 0x0D2ED9EF, 0x3BF7CCC1);
78const SIMD_4x32 sbou = SIMD_4x32(0x6FBDC700, 0xD0D26D17, 0xC502A878, 0x15AABF7A);
79const SIMD_4x32 sbot = SIMD_4x32(0x5FBB6A00, 0xCFE474A5, 0x412B35FA, 0x8E1E90D1);
80
81const SIMD_4x32 sboud = SIMD_4x32(0x7EF94000, 0x1387EA53, 0xD4943E2D, 0xC7AA6DB9);
82const SIMD_4x32 sbotd = SIMD_4x32(0x93441D00, 0x12D7560F, 0xD8C58E9C, 0xCA4B8159);
83
84const SIMD_4x32 mc_forward[4] = {
85 SIMD_4x32(0x00030201, 0x04070605, 0x080B0A09, 0x0C0F0E0D),
86 SIMD_4x32(0x04070605, 0x080B0A09, 0x0C0F0E0D, 0x00030201),
87 SIMD_4x32(0x080B0A09, 0x0C0F0E0D, 0x00030201, 0x04070605),
88 SIMD_4x32(0x0C0F0E0D, 0x00030201, 0x04070605, 0x080B0A09)
89};
90
91const SIMD_4x32 vperm_sr[4] = {
92 SIMD_4x32(0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C),
93 SIMD_4x32(0x0F0A0500, 0x030E0904, 0x07020D08, 0x0B06010C),
94 SIMD_4x32(0x0B020900, 0x0F060D04, 0x030A0108, 0x070E050C),
95 SIMD_4x32(0x070A0D00, 0x0B0E0104, 0x0F020508, 0x0306090C),
96};
97
98const SIMD_4x32 rcon[10] = {
99 SIMD_4x32(0x00000070, 0x00000000, 0x00000000, 0x00000000),
100 SIMD_4x32(0x0000002A, 0x00000000, 0x00000000, 0x00000000),
101 SIMD_4x32(0x00000098, 0x00000000, 0x00000000, 0x00000000),
102 SIMD_4x32(0x00000008, 0x00000000, 0x00000000, 0x00000000),
103 SIMD_4x32(0x0000004D, 0x00000000, 0x00000000, 0x00000000),
104 SIMD_4x32(0x0000007C, 0x00000000, 0x00000000, 0x00000000),
105 SIMD_4x32(0x0000007D, 0x00000000, 0x00000000, 0x00000000),
106 SIMD_4x32(0x00000081, 0x00000000, 0x00000000, 0x00000000),
107 SIMD_4x32(0x0000001F, 0x00000000, 0x00000000, 0x00000000),
108 SIMD_4x32(0x00000083, 0x00000000, 0x00000000, 0x00000000),
109};
110
111const SIMD_4x32 sb2u = SIMD_4x32(0x0B712400, 0xE27A93C6, 0xBC982FCD, 0x5EB7E955);
112const SIMD_4x32 sb2t = SIMD_4x32(0x0AE12900, 0x69EB8840, 0xAB82234A, 0xC2A163C8);
113
114const SIMD_4x32 k_dipt1 = SIMD_4x32(0x0B545F00, 0x0F505B04, 0x114E451A, 0x154A411E);
115const SIMD_4x32 k_dipt2 = SIMD_4x32(0x60056500, 0x86E383E6, 0xF491F194, 0x12771772);
116
117const SIMD_4x32 sb9u = SIMD_4x32(0x9A86D600, 0x851C0353, 0x4F994CC9, 0xCAD51F50);
118const SIMD_4x32 sb9t = SIMD_4x32(0xECD74900, 0xC03B1789, 0xB2FBA565, 0x725E2C9E);
119
120const SIMD_4x32 sbeu = SIMD_4x32(0x26D4D000, 0x46F29296, 0x64B4F6B0, 0x22426004);
121const SIMD_4x32 sbet = SIMD_4x32(0xFFAAC100, 0x0C55A6CD, 0x98593E32, 0x9467F36B);
122
123const SIMD_4x32 sbdu = SIMD_4x32(0xE6B1A200, 0x7D57CCDF, 0x882A4439, 0xF56E9B13);
124const SIMD_4x32 sbdt = SIMD_4x32(0x24C6CB00, 0x3CE2FAF7, 0x15DEEFD3, 0x2931180D);
125
126const SIMD_4x32 sbbu = SIMD_4x32(0x96B44200, 0xD0226492, 0xB0F2D404, 0x602646F6);
127const SIMD_4x32 sbbt = SIMD_4x32(0xCD596700, 0xC19498A6, 0x3255AA6B, 0xF3FF0C3E);
128
129const SIMD_4x32 mcx[4] = {
130 SIMD_4x32(0x0C0F0E0D, 0x00030201, 0x04070605, 0x080B0A09),
131 SIMD_4x32(0x080B0A09, 0x0C0F0E0D, 0x00030201, 0x04070605),
132 SIMD_4x32(0x04070605, 0x080B0A09, 0x0C0F0E0D, 0x00030201),
133 SIMD_4x32(0x00030201, 0x04070605, 0x080B0A09, 0x0C0F0E0D),
134};
135
136const SIMD_4x32 mc_backward[4] = {
137 SIMD_4x32(0x02010003, 0x06050407, 0x0A09080B, 0x0E0D0C0F),
138 SIMD_4x32(0x0E0D0C0F, 0x02010003, 0x06050407, 0x0A09080B),
139 SIMD_4x32(0x0A09080B, 0x0E0D0C0F, 0x02010003, 0x06050407),
140 SIMD_4x32(0x06050407, 0x0A09080B, 0x0E0D0C0F, 0x02010003),
141};
142
143const SIMD_4x32 lo_nibs_mask = SIMD_4x32::splat_u8(0x0F);
144
145inline SIMD_4x32 low_nibs(SIMD_4x32 x)
146 {
147 return lo_nibs_mask & x;
148 }
149
150inline SIMD_4x32 high_nibs(SIMD_4x32 x)
151 {
152 return (x.shr<4>() & lo_nibs_mask);
153 }
154
155inline SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA) aes_enc_first_round(SIMD_4x32 B, SIMD_4x32 K)
156 {
157 return shuffle(k_ipt1, low_nibs(B)) ^ shuffle(k_ipt2, high_nibs(B)) ^ K;
158 }
159
160inline SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA) aes_enc_round(SIMD_4x32 B, SIMD_4x32 K, size_t r)
161 {
162 const SIMD_4x32 Bh = high_nibs(B);
163 SIMD_4x32 Bl = low_nibs(B);
164 const SIMD_4x32 t2 = shuffle(k_inv2, Bl);
165 Bl ^= Bh;
166
167 const SIMD_4x32 t5 = Bl ^ shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bh));
168 const SIMD_4x32 t6 = Bh ^ shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bl));
169
170 const SIMD_4x32 t7 = shuffle(sb1t, t6) ^ shuffle(sb1u, t5) ^ K;
171 const SIMD_4x32 t8 = shuffle(sb2t, t6) ^ shuffle(sb2u, t5) ^ shuffle(t7, mc_forward[r % 4]);
172
173 return shuffle(t8, mc_forward[r % 4]) ^ shuffle(t7, mc_backward[r % 4]) ^ t8;
174 }
175
176inline SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA) aes_enc_last_round(SIMD_4x32 B, SIMD_4x32 K, size_t r)
177 {
178 const SIMD_4x32 Bh = high_nibs(B);
179 SIMD_4x32 Bl = low_nibs(B);
180 const SIMD_4x32 t2 = shuffle(k_inv2, Bl);
181 Bl ^= Bh;
182
183 const SIMD_4x32 t5 = Bl ^ shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bh));
184 const SIMD_4x32 t6 = Bh ^ shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bl));
185
186 return shuffle(shuffle(sbou, t5) ^ shuffle(sbot, t6) ^ K, vperm_sr[r % 4]);
187 }
188
189inline SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA) aes_dec_first_round(SIMD_4x32 B, SIMD_4x32 K)
190 {
191 return shuffle(k_dipt1, low_nibs(B)) ^ shuffle(k_dipt2, high_nibs(B)) ^ K;
192 }
193
194inline SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA) aes_dec_round(SIMD_4x32 B, SIMD_4x32 K, size_t r)
195 {
196 const SIMD_4x32 Bh = high_nibs(B);
197 B = low_nibs(B);
198 const SIMD_4x32 t2 = shuffle(k_inv2, B);
199
200 B ^= Bh;
201
202 const SIMD_4x32 t5 = B ^ shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bh));
203 const SIMD_4x32 t6 = Bh ^ shuffle(k_inv1, t2 ^ shuffle(k_inv1, B));
204
205 const SIMD_4x32 mc = mcx[(r-1)%4];
206
207 const SIMD_4x32 t8 = shuffle(sb9t, t6) ^ shuffle(sb9u, t5) ^ K;
208 const SIMD_4x32 t9 = shuffle(t8, mc) ^ shuffle(sbdu, t5) ^ shuffle(sbdt, t6);
209 const SIMD_4x32 t12 = shuffle(t9, mc) ^ shuffle(sbbu, t5) ^ shuffle(sbbt, t6);
210 return shuffle(t12, mc) ^ shuffle(sbeu, t5) ^ shuffle(sbet, t6);
211 }
212
213inline SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA) aes_dec_last_round(SIMD_4x32 B, SIMD_4x32 K, size_t r)
214 {
215 const uint32_t which_sr = ((((r - 1) << 4) ^ 48) & 48) / 16;
216
217 const SIMD_4x32 Bh = high_nibs(B);
218 B = low_nibs(B);
219 const SIMD_4x32 t2 = shuffle(k_inv2, B);
220
221 B ^= Bh;
222
223 const SIMD_4x32 t5 = B ^ shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bh));
224 const SIMD_4x32 t6 = Bh ^ shuffle(k_inv1, t2 ^ shuffle(k_inv1, B));
225
226 const SIMD_4x32 x = shuffle(sboud, t5) ^ shuffle(sbotd, t6) ^ K;
227 return shuffle(x, vperm_sr[which_sr]);
228 }
229
230void BOTAN_FUNC_ISA(BOTAN_VPERM_ISA)
231 vperm_encrypt_blocks(const uint8_t in[], uint8_t out[], size_t blocks,
232 const SIMD_4x32 K[], size_t rounds)
233 {
234 CT::poison(in, blocks * 16);
235
236 const size_t blocks2 = blocks - (blocks % 2);
237
238 for(size_t i = 0; i != blocks2; i += 2)
239 {
240 SIMD_4x32 B0 = SIMD_4x32::load_le(in + i*16);
241 SIMD_4x32 B1 = SIMD_4x32::load_le(in + (i+1)*16);
242
243 B0 = aes_enc_first_round(B0, K[0]);
244 B1 = aes_enc_first_round(B1, K[0]);
245
246 for(size_t r = 1; r != rounds; ++r)
247 {
248 B0 = aes_enc_round(B0, K[r], r);
249 B1 = aes_enc_round(B1, K[r], r);
250 }
251
252 B0 = aes_enc_last_round(B0, K[rounds], rounds);
253 B1 = aes_enc_last_round(B1, K[rounds], rounds);
254
255 B0.store_le(out + i*16);
256 B1.store_le(out + (i+1)*16);
257 }
258
259 for(size_t i = blocks2; i < blocks; ++i)
260 {
261 SIMD_4x32 B = SIMD_4x32::load_le(in + i*16); // ???
262
263 B = aes_enc_first_round(B, K[0]);
264
265 for(size_t r = 1; r != rounds; ++r)
266 {
267 B = aes_enc_round(B, K[r], r);
268 }
269
270 B = aes_enc_last_round(B, K[rounds], rounds);
271 B.store_le(out + i*16);
272 }
273
274 CT::unpoison(in, blocks * 16);
275 CT::unpoison(out, blocks * 16);
276 }
277
278void BOTAN_FUNC_ISA(BOTAN_VPERM_ISA)
279 vperm_decrypt_blocks(const uint8_t in[], uint8_t out[], size_t blocks,
280 const SIMD_4x32 K[], size_t rounds)
281 {
282 CT::poison(in, blocks * 16);
283
284 const size_t blocks2 = blocks - (blocks % 2);
285
286 for(size_t i = 0; i != blocks2; i += 2)
287 {
288 SIMD_4x32 B0 = SIMD_4x32::load_le(in + i*16);
289 SIMD_4x32 B1 = SIMD_4x32::load_le(in + (i+1)*16);
290
291 B0 = aes_dec_first_round(B0, K[0]);
292 B1 = aes_dec_first_round(B1, K[0]);
293
294 for(size_t r = 1; r != rounds; ++r)
295 {
296 B0 = aes_dec_round(B0, K[r], r);
297 B1 = aes_dec_round(B1, K[r], r);
298 }
299
300 B0 = aes_dec_last_round(B0, K[rounds], rounds);
301 B1 = aes_dec_last_round(B1, K[rounds], rounds);
302
303 B0.store_le(out + i*16);
304 B1.store_le(out + (i+1)*16);
305 }
306
307 for(size_t i = blocks2; i < blocks; ++i)
308 {
309 SIMD_4x32 B = SIMD_4x32::load_le(in + i*16); // ???
310
311 B = aes_dec_first_round(B, K[0]);
312
313 for(size_t r = 1; r != rounds; ++r)
314 {
315 B = aes_dec_round(B, K[r], r);
316 }
317
318 B = aes_dec_last_round(B, K[rounds], rounds);
319 B.store_le(out + i*16);
320 }
321
322 CT::unpoison(in, blocks * 16);
323 CT::unpoison(out, blocks * 16);
324 }
325
326}
327
328void AES_128::vperm_encrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const
329 {
330 const SIMD_4x32 K[11] = {
331 SIMD_4x32(&m_EK[4* 0]), SIMD_4x32(&m_EK[4* 1]), SIMD_4x32(&m_EK[4* 2]),
332 SIMD_4x32(&m_EK[4* 3]), SIMD_4x32(&m_EK[4* 4]), SIMD_4x32(&m_EK[4* 5]),
333 SIMD_4x32(&m_EK[4* 6]), SIMD_4x32(&m_EK[4* 7]), SIMD_4x32(&m_EK[4* 8]),
334 SIMD_4x32(&m_EK[4* 9]), SIMD_4x32(&m_EK[4*10]),
335 };
336
337 return vperm_encrypt_blocks(in, out, blocks, K, 10);
338 }
339
340void AES_128::vperm_decrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const
341 {
342 const SIMD_4x32 K[11] = {
343 SIMD_4x32(&m_DK[4* 0]), SIMD_4x32(&m_DK[4* 1]), SIMD_4x32(&m_DK[4* 2]),
344 SIMD_4x32(&m_DK[4* 3]), SIMD_4x32(&m_DK[4* 4]), SIMD_4x32(&m_DK[4* 5]),
345 SIMD_4x32(&m_DK[4* 6]), SIMD_4x32(&m_DK[4* 7]), SIMD_4x32(&m_DK[4* 8]),
346 SIMD_4x32(&m_DK[4* 9]), SIMD_4x32(&m_DK[4*10]),
347 };
348
349 return vperm_decrypt_blocks(in, out, blocks, K, 10);
350 }
351
352void AES_192::vperm_encrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const
353 {
354 const SIMD_4x32 K[13] = {
355 SIMD_4x32(&m_EK[4* 0]), SIMD_4x32(&m_EK[4* 1]), SIMD_4x32(&m_EK[4* 2]),
356 SIMD_4x32(&m_EK[4* 3]), SIMD_4x32(&m_EK[4* 4]), SIMD_4x32(&m_EK[4* 5]),
357 SIMD_4x32(&m_EK[4* 6]), SIMD_4x32(&m_EK[4* 7]), SIMD_4x32(&m_EK[4* 8]),
358 SIMD_4x32(&m_EK[4* 9]), SIMD_4x32(&m_EK[4*10]), SIMD_4x32(&m_EK[4*11]),
359 SIMD_4x32(&m_EK[4*12]),
360 };
361
362 return vperm_encrypt_blocks(in, out, blocks, K, 12);
363 }
364
365void AES_192::vperm_decrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const
366 {
367 const SIMD_4x32 K[13] = {
368 SIMD_4x32(&m_DK[4* 0]), SIMD_4x32(&m_DK[4* 1]), SIMD_4x32(&m_DK[4* 2]),
369 SIMD_4x32(&m_DK[4* 3]), SIMD_4x32(&m_DK[4* 4]), SIMD_4x32(&m_DK[4* 5]),
370 SIMD_4x32(&m_DK[4* 6]), SIMD_4x32(&m_DK[4* 7]), SIMD_4x32(&m_DK[4* 8]),
371 SIMD_4x32(&m_DK[4* 9]), SIMD_4x32(&m_DK[4*10]), SIMD_4x32(&m_DK[4*11]),
372 SIMD_4x32(&m_DK[4*12]),
373 };
374
375 return vperm_decrypt_blocks(in, out, blocks, K, 12);
376 }
377
378void AES_256::vperm_encrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const
379 {
380 const SIMD_4x32 K[15] = {
381 SIMD_4x32(&m_EK[4* 0]), SIMD_4x32(&m_EK[4* 1]), SIMD_4x32(&m_EK[4* 2]),
382 SIMD_4x32(&m_EK[4* 3]), SIMD_4x32(&m_EK[4* 4]), SIMD_4x32(&m_EK[4* 5]),
383 SIMD_4x32(&m_EK[4* 6]), SIMD_4x32(&m_EK[4* 7]), SIMD_4x32(&m_EK[4* 8]),
384 SIMD_4x32(&m_EK[4* 9]), SIMD_4x32(&m_EK[4*10]), SIMD_4x32(&m_EK[4*11]),
385 SIMD_4x32(&m_EK[4*12]), SIMD_4x32(&m_EK[4*13]), SIMD_4x32(&m_EK[4*14]),
386 };
387
388 return vperm_encrypt_blocks(in, out, blocks, K, 14);
389 }
390
391void AES_256::vperm_decrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const
392 {
393 const SIMD_4x32 K[15] = {
394 SIMD_4x32(&m_DK[4* 0]), SIMD_4x32(&m_DK[4* 1]), SIMD_4x32(&m_DK[4* 2]),
395 SIMD_4x32(&m_DK[4* 3]), SIMD_4x32(&m_DK[4* 4]), SIMD_4x32(&m_DK[4* 5]),
396 SIMD_4x32(&m_DK[4* 6]), SIMD_4x32(&m_DK[4* 7]), SIMD_4x32(&m_DK[4* 8]),
397 SIMD_4x32(&m_DK[4* 9]), SIMD_4x32(&m_DK[4*10]), SIMD_4x32(&m_DK[4*11]),
398 SIMD_4x32(&m_DK[4*12]), SIMD_4x32(&m_DK[4*13]), SIMD_4x32(&m_DK[4*14]),
399 };
400
401 return vperm_decrypt_blocks(in, out, blocks, K, 14);
402 }
403
404namespace {
405
406inline SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA)
407 aes_schedule_transform(SIMD_4x32 input,
408 SIMD_4x32 table_1,
409 SIMD_4x32 table_2)
410 {
411 return shuffle(table_1, low_nibs(input)) ^ shuffle(table_2, high_nibs(input));
412 }
413
414SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA) aes_schedule_mangle(SIMD_4x32 k, uint8_t round_no)
415 {
416 const SIMD_4x32 mc_forward0(0x00030201, 0x04070605, 0x080B0A09, 0x0C0F0E0D);
417
418 SIMD_4x32 t = shuffle(k ^ SIMD_4x32::splat_u8(0x5B), mc_forward0);
419 SIMD_4x32 t2 = t;
420 t = shuffle(t, mc_forward0);
421 t2 = t ^ t2 ^ shuffle(t, mc_forward0);
422 return shuffle(t2, vperm_sr[round_no % 4]);
423 }
424
425SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA) aes_schedule_mangle_dec(SIMD_4x32 k, uint8_t round_no)
426 {
427 const SIMD_4x32 mc_forward0(0x00030201, 0x04070605, 0x080B0A09, 0x0C0F0E0D);
428
429 const SIMD_4x32 dsk[8] = {
430 SIMD_4x32(0x7ED9A700, 0xB6116FC8, 0x82255BFC, 0x4AED9334),
431 SIMD_4x32(0x27143300, 0x45765162, 0xE9DAFDCE, 0x8BB89FAC),
432 SIMD_4x32(0xCCA86400, 0x27438FEB, 0xADC90561, 0x4622EE8A),
433 SIMD_4x32(0x4F92DD00, 0x815C13CE, 0xBD602FF2, 0x73AEE13C),
434 SIMD_4x32(0x01C6C700, 0x03C4C502, 0xFA3D3CFB, 0xF83F3EF9),
435 SIMD_4x32(0x38CFF700, 0xEE1921D6, 0x7384BC4B, 0xA5526A9D),
436 SIMD_4x32(0x53732000, 0xE3C390B0, 0x10306343, 0xA080D3F3),
437 SIMD_4x32(0x036982E8, 0xA0CA214B, 0x8CE60D67, 0x2F45AEC4),
438 };
439
440 SIMD_4x32 t = aes_schedule_transform(k, dsk[0], dsk[1]);
441 SIMD_4x32 output = shuffle(t, mc_forward0);
442
443 t = aes_schedule_transform(t, dsk[2], dsk[3]);
444 output = shuffle(t ^ output, mc_forward0);
445
446 t = aes_schedule_transform(t, dsk[4], dsk[5]);
447 output = shuffle(t ^ output, mc_forward0);
448
449 t = aes_schedule_transform(t, dsk[6], dsk[7]);
450 output = shuffle(t ^ output, mc_forward0);
451
452 return shuffle(output, vperm_sr[round_no % 4]);
453 }
454
455SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA) aes_schedule_mangle_last(SIMD_4x32 k, uint8_t round_no)
456 {
457 const SIMD_4x32 out_tr1(0xD6B66000, 0xFF9F4929, 0xDEBE6808, 0xF7974121);
458 const SIMD_4x32 out_tr2(0x50BCEC00, 0x01EDBD51, 0xB05C0CE0, 0xE10D5DB1);
459
460 k = shuffle(k, vperm_sr[round_no % 4]);
461 k ^= SIMD_4x32::splat_u8(0x5B);
462 return aes_schedule_transform(k, out_tr1, out_tr2);
463 }
464
465SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA) aes_schedule_mangle_last_dec(SIMD_4x32 k)
466 {
467 const SIMD_4x32 deskew1(0x47A4E300, 0x07E4A340, 0x5DBEF91A, 0x1DFEB95A);
468 const SIMD_4x32 deskew2(0x83EA6900, 0x5F36B5DC, 0xF49D1E77, 0x2841C2AB);
469
470 k ^= SIMD_4x32::splat_u8(0x5B);
471 return aes_schedule_transform(k, deskew1, deskew2);
472 }
473
474SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA) aes_schedule_round(SIMD_4x32 input1, SIMD_4x32 input2)
475 {
476 SIMD_4x32 smeared = input2 ^ input2.shift_elems_left<1>();
477 smeared ^= smeared.shift_elems_left<2>();
478 smeared ^= SIMD_4x32::splat_u8(0x5B);
479
480 const SIMD_4x32 Bh = high_nibs(input1);
481 SIMD_4x32 Bl = low_nibs(input1);
482
483 const SIMD_4x32 t2 = shuffle(k_inv2, Bl);
484
485 Bl ^= Bh;
486
487 SIMD_4x32 t5 = Bl ^ shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bh));
488 SIMD_4x32 t6 = Bh ^ shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bl));
489
490 return smeared ^ shuffle(sb1u, t5) ^ shuffle(sb1t, t6);
491 }
492
493SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA) aes_schedule_round(SIMD_4x32 rc, SIMD_4x32 input1, SIMD_4x32 input2)
494 {
495 // This byte shuffle is equivalent to alignr<1>(shuffle32(input1, (3,3,3,3)));
496 const SIMD_4x32 shuffle3333_15 = SIMD_4x32::splat(0x0C0F0E0D);
497 return aes_schedule_round(shuffle(input1, shuffle3333_15), input2 ^ rc);
498 }
499
500SIMD_4x32 BOTAN_FUNC_ISA(BOTAN_VPERM_ISA) aes_schedule_192_smear(SIMD_4x32 x, SIMD_4x32 y)
501 {
502 const SIMD_4x32 shuffle3332 =
503 SIMD_4x32(0x0B0A0908, 0x0F0E0D0C, 0x0F0E0D0C, 0x0F0E0D0C);
504 const SIMD_4x32 shuffle2000 =
505 SIMD_4x32(0x03020100, 0x03020100, 0x03020100, 0x0B0A0908);
506
507 const SIMD_4x32 zero_top_half(0, 0, 0xFFFFFFFF, 0xFFFFFFFF);
508 y &= zero_top_half;
509 return y ^ shuffle(x, shuffle3332) ^ shuffle(y, shuffle2000);
510 }
511
512}
513
514void AES_128::vperm_key_schedule(const uint8_t keyb[], size_t)
515 {
516 m_EK.resize(11*4);
517 m_DK.resize(11*4);
518
519 SIMD_4x32 key = SIMD_4x32::load_le(keyb);
520
521 shuffle(key, vperm_sr[2]).store_le(&m_DK[4*10]);
522
523 key = aes_schedule_transform(key, k_ipt1, k_ipt2);
524 key.store_le(&m_EK[0]);
525
526 for(size_t i = 1; i != 10; ++i)
527 {
528 key = aes_schedule_round(rcon[i-1], key, key);
529
530 aes_schedule_mangle(key, (12-i) % 4).store_le(&m_EK[4*i]);
531
532 aes_schedule_mangle_dec(key, (10-i)%4).store_le(&m_DK[4*(10-i)]);
533 }
534
535 key = aes_schedule_round(rcon[9], key, key);
536 aes_schedule_mangle_last(key, 2).store_le(&m_EK[4*10]);
537 aes_schedule_mangle_last_dec(key).store_le(&m_DK[0]);
538 }
539
540void AES_192::vperm_key_schedule(const uint8_t keyb[], size_t)
541 {
542 m_EK.resize(13*4);
543 m_DK.resize(13*4);
544
545 SIMD_4x32 key1 = SIMD_4x32::load_le(keyb);
546 SIMD_4x32 key2 = SIMD_4x32::load_le(keyb + 8);
547
548 shuffle(key1, vperm_sr[0]).store_le(&m_DK[12*4]);
549
550 key1 = aes_schedule_transform(key1, k_ipt1, k_ipt2);
551 key2 = aes_schedule_transform(key2, k_ipt1, k_ipt2);
552
553 key1.store_le(&m_EK[0]);
554
555 for(size_t i = 0; i != 4; ++i)
556 {
557 // key2 with 8 high bytes masked off
558 SIMD_4x32 t = key2;
559 key2 = aes_schedule_round(rcon[2*i], key2, key1);
560 const SIMD_4x32 key2t = alignr8(key2, t);
561 aes_schedule_mangle(key2t, (i+3)%4).store_le(&m_EK[4*(3*i+1)]);
562 aes_schedule_mangle_dec(key2t, (i+3)%4).store_le(&m_DK[4*(11-3*i)]);
563
564 t = aes_schedule_192_smear(key2, t);
565
566 aes_schedule_mangle(t, (i+2)%4).store_le(&m_EK[4*(3*i+2)]);
567 aes_schedule_mangle_dec(t, (i+2)%4).store_le(&m_DK[4*(10-3*i)]);
568
569 key2 = aes_schedule_round(rcon[2*i+1], t, key2);
570
571 if(i == 3)
572 {
573 aes_schedule_mangle_last(key2, (i+1)%4).store_le(&m_EK[4*(3*i+3)]);
574 aes_schedule_mangle_last_dec(key2).store_le(&m_DK[4*(9-3*i)]);
575 }
576 else
577 {
578 aes_schedule_mangle(key2, (i+1)%4).store_le(&m_EK[4*(3*i+3)]);
579 aes_schedule_mangle_dec(key2, (i+1)%4).store_le(&m_DK[4*(9-3*i)]);
580 }
581
582 key1 = key2;
583 key2 = aes_schedule_192_smear(key2, t);
584 }
585 }
586
587void AES_256::vperm_key_schedule(const uint8_t keyb[], size_t)
588 {
589 m_EK.resize(15*4);
590 m_DK.resize(15*4);
591
592 SIMD_4x32 key1 = SIMD_4x32::load_le(keyb);
593 SIMD_4x32 key2 = SIMD_4x32::load_le(keyb + 16);
594
595 shuffle(key1, vperm_sr[2]).store_le(&m_DK[4*14]);
596
597 key1 = aes_schedule_transform(key1, k_ipt1, k_ipt2);
598 key2 = aes_schedule_transform(key2, k_ipt1, k_ipt2);
599
600 key1.store_le(&m_EK[0]);
601 aes_schedule_mangle(key2, 3).store_le(&m_EK[4]);
602
603 aes_schedule_mangle_dec(key2, 1).store_le(&m_DK[4*13]);
604
605 const SIMD_4x32 shuffle3333 = SIMD_4x32::splat(0x0F0E0D0C);
606
607 for(size_t i = 2; i != 14; i += 2)
608 {
609 const SIMD_4x32 k_t = key2;
610 key1 = key2 = aes_schedule_round(rcon[(i/2)-1], key2, key1);
611
612 aes_schedule_mangle(key2, i % 4).store_le(&m_EK[4*i]);
613 aes_schedule_mangle_dec(key2, (i+2)%4).store_le(&m_DK[4*(14-i)]);
614
615 key2 = aes_schedule_round(shuffle(key2, shuffle3333), k_t);
616
617 aes_schedule_mangle(key2, (i-1)%4).store_le(&m_EK[4*(i+1)]);
618 aes_schedule_mangle_dec(key2, (i+1)%4).store_le(&m_DK[4*(13-i)]);
619 }
620
621 key2 = aes_schedule_round(rcon[6], key2, key1);
622
623 aes_schedule_mangle_last(key2, 2).store_le(&m_EK[4*14]);
624 aes_schedule_mangle_last_dec(key2).store_le(&m_DK[0]);
625 }
626
627}
static SIMD_4x32 splat(uint32_t B)
Definition simd_32.h:131
static SIMD_4x32 splat_u8(uint8_t B)
Definition simd_32.h:145
static SIMD_4x32 load_le(const void *in)
Definition simd_32.h:160
#define BOTAN_FUNC_ISA(isa)
Definition compiler.h:77
void poison(const T *p, size_t n)
Definition ct_utils.h:48
void unpoison(const T *p, size_t n)
Definition ct_utils.h:59