aes.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. /*
  2. * copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * some optimization ideas from aes128.c by Reimar Doeffinger
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include <string.h>
  23. #include "config.h"
  24. #include "aes.h"
  25. #include "aes_internal.h"
  26. #include "attributes.h"
  27. #include "error.h"
  28. #include "intreadwrite.h"
  29. #include "macros.h"
  30. #include "mem.h"
  31. #include "thread.h"
  32. const int av_aes_size= sizeof(AVAES);
  33. struct AVAES *av_aes_alloc(void)
  34. {
  35. return av_mallocz(sizeof(struct AVAES));
  36. }
  37. static const uint8_t rcon[10] = {
  38. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36
  39. };
  40. static uint8_t sbox[256];
  41. static uint8_t inv_sbox[256];
  42. #if CONFIG_SMALL
  43. static uint32_t enc_multbl[1][256];
  44. static uint32_t dec_multbl[1][256];
  45. #else
  46. static uint32_t enc_multbl[4][256];
  47. static uint32_t dec_multbl[4][256];
  48. #endif
  49. #if HAVE_BIGENDIAN
  50. # define ROT(x, s) (((x) >> (s)) | ((x) << (32-(s))))
  51. #else
  52. # define ROT(x, s) (((x) << (s)) | ((x) >> (32-(s))))
  53. #endif
  54. static inline void addkey(av_aes_block *dst, const av_aes_block *src,
  55. const av_aes_block *round_key)
  56. {
  57. dst->u64[0] = src->u64[0] ^ round_key->u64[0];
  58. dst->u64[1] = src->u64[1] ^ round_key->u64[1];
  59. }
  60. static inline void addkey_s(av_aes_block *dst, const uint8_t *src,
  61. const av_aes_block *round_key)
  62. {
  63. dst->u64[0] = AV_RN64(src) ^ round_key->u64[0];
  64. dst->u64[1] = AV_RN64(src + 8) ^ round_key->u64[1];
  65. }
  66. static inline void addkey_d(uint8_t *dst, const av_aes_block *src,
  67. const av_aes_block *round_key)
  68. {
  69. AV_WN64(dst, src->u64[0] ^ round_key->u64[0]);
  70. AV_WN64(dst + 8, src->u64[1] ^ round_key->u64[1]);
  71. }
  72. static void subshift(av_aes_block s0[2], int s, const uint8_t *box)
  73. {
  74. unsigned char *s1_dst = (unsigned char*)s0[0].u8 + 3 - s;
  75. const unsigned char *s1_src = s1_dst + sizeof(*s0);
  76. unsigned char *s3_dst = (unsigned char*)s0[0].u8 + s + 1;
  77. const unsigned char *s3_src = s3_dst + sizeof(*s0);
  78. s0[0].u8[ 0] = box[s0[1].u8[ 0]];
  79. s0[0].u8[ 4] = box[s0[1].u8[ 4]];
  80. s0[0].u8[ 8] = box[s0[1].u8[ 8]];
  81. s0[0].u8[12] = box[s0[1].u8[12]];
  82. s1_dst[ 0] = box[s1_src[ 4]];
  83. s1_dst[ 4] = box[s1_src[ 8]];
  84. s1_dst[ 8] = box[s1_src[12]];
  85. s1_dst[12] = box[s1_src[ 0]];
  86. s0[0].u8[ 2] = box[s0[1].u8[10]];
  87. s0[0].u8[10] = box[s0[1].u8[ 2]];
  88. s0[0].u8[ 6] = box[s0[1].u8[14]];
  89. s0[0].u8[14] = box[s0[1].u8[ 6]];
  90. s3_dst[ 0] = box[s3_src[12]];
  91. s3_dst[12] = box[s3_src[ 8]];
  92. s3_dst[ 8] = box[s3_src[ 4]];
  93. s3_dst[ 4] = box[s3_src[ 0]];
  94. }
  95. static inline int mix_core(uint32_t multbl[][256], int a, int b, int c, int d)
  96. {
  97. #if CONFIG_SMALL
  98. return multbl[0][a] ^ ROT(multbl[0][b], 8) ^ ROT(multbl[0][c], 16) ^ ROT(multbl[0][d], 24);
  99. #else
  100. return multbl[0][a] ^ multbl[1][b] ^ multbl[2][c] ^ multbl[3][d];
  101. #endif
  102. }
  103. static inline void mix(av_aes_block state[2], uint32_t multbl[][256], int s1, int s3)
  104. {
  105. uint8_t (*src)[4] = state[1].u8x4;
  106. state[0].u32[0] = mix_core(multbl, src[0][0], src[s1 ][1], src[2][2], src[s3 ][3]);
  107. state[0].u32[1] = mix_core(multbl, src[1][0], src[s3 - 1][1], src[3][2], src[s1 - 1][3]);
  108. state[0].u32[2] = mix_core(multbl, src[2][0], src[s3 ][1], src[0][2], src[s1 ][3]);
  109. state[0].u32[3] = mix_core(multbl, src[3][0], src[s1 - 1][1], src[1][2], src[s3 - 1][3]);
  110. }
  111. static inline void aes_crypt(AVAES *a, int s, const uint8_t *sbox,
  112. uint32_t multbl[][256])
  113. {
  114. int r;
  115. for (r = a->rounds - 1; r > 0; r--) {
  116. mix(a->state, multbl, 3 - s, 1 + s);
  117. addkey(&a->state[1], &a->state[0], &a->round_key[r]);
  118. }
  119. subshift(&a->state[0], s, sbox);
  120. }
  121. static void aes_encrypt(AVAES *a, uint8_t *dst, const uint8_t *src,
  122. int count, uint8_t *iv, int rounds)
  123. {
  124. while (count--) {
  125. addkey_s(&a->state[1], src, &a->round_key[rounds]);
  126. if (iv)
  127. addkey_s(&a->state[1], iv, &a->state[1]);
  128. aes_crypt(a, 2, sbox, enc_multbl);
  129. addkey_d(dst, &a->state[0], &a->round_key[0]);
  130. if (iv)
  131. memcpy(iv, dst, 16);
  132. src += 16;
  133. dst += 16;
  134. }
  135. }
  136. static void aes_decrypt(AVAES *a, uint8_t *dst, const uint8_t *src,
  137. int count, uint8_t *iv, int rounds)
  138. {
  139. while (count--) {
  140. addkey_s(&a->state[1], src, &a->round_key[rounds]);
  141. aes_crypt(a, 0, inv_sbox, dec_multbl);
  142. if (iv) {
  143. addkey_s(&a->state[0], iv, &a->state[0]);
  144. memcpy(iv, src, 16);
  145. }
  146. addkey_d(dst, &a->state[0], &a->round_key[0]);
  147. src += 16;
  148. dst += 16;
  149. }
  150. }
  151. void av_aes_crypt(AVAES *a, uint8_t *dst, const uint8_t *src,
  152. int count, uint8_t *iv, int decrypt)
  153. {
  154. a->crypt(a, dst, src, count, iv, a->rounds);
  155. }
  156. static void init_multbl2(uint32_t tbl[][256], const int c[4],
  157. const uint8_t *log8, const uint8_t *alog8,
  158. const uint8_t *sbox)
  159. {
  160. int i;
  161. for (i = 0; i < 256; i++) {
  162. int x = sbox[i];
  163. if (x) {
  164. int k, l, m, n;
  165. x = log8[x];
  166. k = alog8[x + log8[c[0]]];
  167. l = alog8[x + log8[c[1]]];
  168. m = alog8[x + log8[c[2]]];
  169. n = alog8[x + log8[c[3]]];
  170. tbl[0][i] = AV_NE(MKBETAG(k, l, m, n), MKTAG(k, l, m, n));
  171. #if !CONFIG_SMALL
  172. tbl[1][i] = ROT(tbl[0][i], 8);
  173. tbl[2][i] = ROT(tbl[0][i], 16);
  174. tbl[3][i] = ROT(tbl[0][i], 24);
  175. #endif
  176. }
  177. }
  178. }
  179. static AVOnce aes_static_init = AV_ONCE_INIT;
  180. static av_cold void aes_init_static(void)
  181. {
  182. uint8_t log8[256];
  183. uint8_t alog8[512];
  184. int i, j = 1;
  185. for (i = 0; i < 255; i++) {
  186. alog8[i] = alog8[i + 255] = j;
  187. log8[j] = i;
  188. j ^= j + j;
  189. if (j > 255)
  190. j ^= 0x11B;
  191. }
  192. for (i = 0; i < 256; i++) {
  193. j = i ? alog8[255 - log8[i]] : 0;
  194. j ^= (j << 1) ^ (j << 2) ^ (j << 3) ^ (j << 4);
  195. j = (j ^ (j >> 8) ^ 99) & 255;
  196. inv_sbox[j] = i;
  197. sbox[i] = j;
  198. }
  199. init_multbl2(dec_multbl, (const int[4]) { 0xe, 0x9, 0xd, 0xb },
  200. log8, alog8, inv_sbox);
  201. init_multbl2(enc_multbl, (const int[4]) { 0x2, 0x1, 0x1, 0x3 },
  202. log8, alog8, sbox);
  203. }
  204. // this is based on the reference AES code by Paulo Barreto and Vincent Rijmen
  205. int av_aes_init(AVAES *a, const uint8_t *key, int key_bits, int decrypt)
  206. {
  207. int i, j, t, rconpointer = 0;
  208. uint8_t tk[8][4];
  209. int KC = key_bits >> 5;
  210. int rounds = KC + 6;
  211. a->rounds = rounds;
  212. a->crypt = decrypt ? aes_decrypt : aes_encrypt;
  213. #if ARCH_X86
  214. ff_init_aes_x86(a, decrypt);
  215. #endif
  216. ff_thread_once(&aes_static_init, aes_init_static);
  217. if (key_bits != 128 && key_bits != 192 && key_bits != 256)
  218. return AVERROR(EINVAL);
  219. memcpy(tk, key, KC * 4);
  220. memcpy(a->round_key[0].u8, key, KC * 4);
  221. for (t = KC * 4; t < (rounds + 1) * 16; t += KC * 4) {
  222. for (i = 0; i < 4; i++)
  223. tk[0][i] ^= sbox[tk[KC - 1][(i + 1) & 3]];
  224. tk[0][0] ^= rcon[rconpointer++];
  225. for (j = 1; j < KC; j++) {
  226. if (KC != 8 || j != KC >> 1)
  227. for (i = 0; i < 4; i++)
  228. tk[j][i] ^= tk[j - 1][i];
  229. else
  230. for (i = 0; i < 4; i++)
  231. tk[j][i] ^= sbox[tk[j - 1][i]];
  232. }
  233. memcpy((unsigned char*)a->round_key + t, tk, KC * 4);
  234. }
  235. if (decrypt) {
  236. for (i = 1; i < rounds; i++) {
  237. av_aes_block tmp[3];
  238. tmp[2] = a->round_key[i];
  239. subshift(&tmp[1], 0, sbox);
  240. mix(tmp, dec_multbl, 1, 3);
  241. a->round_key[i] = tmp[0];
  242. }
  243. } else {
  244. for (i = 0; i < (rounds + 1) >> 1; i++)
  245. FFSWAP(av_aes_block, a->round_key[i], a->round_key[rounds - i]);
  246. }
  247. return 0;
  248. }