Thu, 22 Jan 2015 13:21:57 +0100
Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6
michael@0 | 1 | # LICENSE: |
michael@0 | 2 | # This submission to NSS is to be made available under the terms of the |
michael@0 | 3 | # Mozilla Public License, v. 2.0. You can obtain one at http: |
michael@0 | 4 | # //mozilla.org/MPL/2.0/. |
michael@0 | 5 | ################################################################################ |
michael@0 | 6 | # Copyright(c) 2012, Intel Corp. |
michael@0 | 7 | |
michael@0 | 8 | .align 16 |
michael@0 | 9 | .Lone: |
michael@0 | 10 | .quad 1,0 |
michael@0 | 11 | .Ltwo: |
michael@0 | 12 | .quad 2,0 |
michael@0 | 13 | .Lbswap_mask: |
michael@0 | 14 | .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 |
michael@0 | 15 | .Lshuff_mask: |
michael@0 | 16 | .quad 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f |
michael@0 | 17 | .Lpoly: |
michael@0 | 18 | .quad 0x1, 0xc200000000000000 |
michael@0 | 19 | |
michael@0 | 20 | |
michael@0 | 21 | ################################################################################ |
michael@0 | 22 | # Generates the final GCM tag |
michael@0 | 23 | # void intel_aes_gcmTAG(uint8_t Htbl[16*16], uint8_t *Tp, uint64_t Mlen, uint64_t Alen, uint8_t* X0, uint8_t* TAG); |
michael@0 | 24 | .type intel_aes_gcmTAG,@function |
michael@0 | 25 | .globl intel_aes_gcmTAG |
michael@0 | 26 | .align 16 |
michael@0 | 27 | intel_aes_gcmTAG: |
michael@0 | 28 | |
michael@0 | 29 | .set Htbl, %rdi |
michael@0 | 30 | .set Tp, %rsi |
michael@0 | 31 | .set Mlen, %rdx |
michael@0 | 32 | .set Alen, %rcx |
michael@0 | 33 | .set X0, %r8 |
michael@0 | 34 | .set TAG, %r9 |
michael@0 | 35 | |
michael@0 | 36 | .set T,%xmm0 |
michael@0 | 37 | .set TMP0,%xmm1 |
michael@0 | 38 | |
michael@0 | 39 | vmovdqu (Tp), T |
michael@0 | 40 | vpshufb .Lbswap_mask(%rip), T, T |
michael@0 | 41 | vpxor TMP0, TMP0, TMP0 |
michael@0 | 42 | shl $3, Mlen |
michael@0 | 43 | shl $3, Alen |
michael@0 | 44 | vpinsrq $0, Mlen, TMP0, TMP0 |
michael@0 | 45 | vpinsrq $1, Alen, TMP0, TMP0 |
michael@0 | 46 | vpxor TMP0, T, T |
michael@0 | 47 | vmovdqu (Htbl), TMP0 |
michael@0 | 48 | call GFMUL |
michael@0 | 49 | vpshufb .Lbswap_mask(%rip), T, T |
michael@0 | 50 | vpxor (X0), T, T |
michael@0 | 51 | vmovdqu T, (TAG) |
michael@0 | 52 | |
michael@0 | 53 | ret |
michael@0 | 54 | .size intel_aes_gcmTAG, .-intel_aes_gcmTAG |
michael@0 | 55 | ################################################################################ |
michael@0 | 56 | # Generates the H table |
michael@0 | 57 | # void intel_aes_gcmINIT(uint8_t Htbl[16*16], uint8_t *KS, int NR); |
michael@0 | 58 | .type intel_aes_gcmINIT,@function |
michael@0 | 59 | .globl intel_aes_gcmINIT |
michael@0 | 60 | .align 16 |
michael@0 | 61 | intel_aes_gcmINIT: |
michael@0 | 62 | |
michael@0 | 63 | .set Htbl, %rdi |
michael@0 | 64 | .set KS, %rsi |
michael@0 | 65 | .set NR, %edx |
michael@0 | 66 | |
michael@0 | 67 | .set T,%xmm0 |
michael@0 | 68 | .set TMP0,%xmm1 |
michael@0 | 69 | |
michael@0 | 70 | CALCULATE_POWERS_OF_H: |
michael@0 | 71 | vmovdqu 16*0(KS), T |
michael@0 | 72 | vaesenc 16*1(KS), T, T |
michael@0 | 73 | vaesenc 16*2(KS), T, T |
michael@0 | 74 | vaesenc 16*3(KS), T, T |
michael@0 | 75 | vaesenc 16*4(KS), T, T |
michael@0 | 76 | vaesenc 16*5(KS), T, T |
michael@0 | 77 | vaesenc 16*6(KS), T, T |
michael@0 | 78 | vaesenc 16*7(KS), T, T |
michael@0 | 79 | vaesenc 16*8(KS), T, T |
michael@0 | 80 | vaesenc 16*9(KS), T, T |
michael@0 | 81 | vmovdqu 16*10(KS), TMP0 |
michael@0 | 82 | cmp $10, NR |
michael@0 | 83 | je .LH0done |
michael@0 | 84 | vaesenc 16*10(KS), T, T |
michael@0 | 85 | vaesenc 16*11(KS), T, T |
michael@0 | 86 | vmovdqu 16*12(KS), TMP0 |
michael@0 | 87 | cmp $12, NR |
michael@0 | 88 | je .LH0done |
michael@0 | 89 | vaesenc 16*12(KS), T, T |
michael@0 | 90 | vaesenc 16*13(KS), T, T |
michael@0 | 91 | vmovdqu 16*14(KS), TMP0 |
michael@0 | 92 | |
michael@0 | 93 | .LH0done: |
michael@0 | 94 | vaesenclast TMP0, T, T |
michael@0 | 95 | |
michael@0 | 96 | vpshufb .Lbswap_mask(%rip), T, T |
michael@0 | 97 | |
michael@0 | 98 | vmovdqu T, TMP0 |
michael@0 | 99 | # Calculate H` = GFMUL(H, 2) |
michael@0 | 100 | vpsrld $7 , T , %xmm3 |
michael@0 | 101 | vmovdqu .Lshuff_mask(%rip), %xmm4 |
michael@0 | 102 | vpshufb %xmm4, %xmm3 , %xmm3 |
michael@0 | 103 | movq $0xff00 , %rax |
michael@0 | 104 | vmovq %rax, %xmm4 |
michael@0 | 105 | vpshufb %xmm3, %xmm4 , %xmm4 |
michael@0 | 106 | vmovdqu .Lpoly(%rip), %xmm5 |
michael@0 | 107 | vpand %xmm4, %xmm5, %xmm5 |
michael@0 | 108 | vpsrld $31, T, %xmm3 |
michael@0 | 109 | vpslld $1, T, %xmm4 |
michael@0 | 110 | vpslldq $4, %xmm3, %xmm3 |
michael@0 | 111 | vpxor %xmm3, %xmm4, T #xmm1 holds now p(x)<<1 |
michael@0 | 112 | |
michael@0 | 113 | #adding p(x)<<1 to xmm5 |
michael@0 | 114 | vpxor %xmm5, T , T |
michael@0 | 115 | vmovdqu T, TMP0 |
michael@0 | 116 | vmovdqu T, (Htbl) # H * 2 |
michael@0 | 117 | call GFMUL |
michael@0 | 118 | vmovdqu T, 16(Htbl) # H^2 * 2 |
michael@0 | 119 | call GFMUL |
michael@0 | 120 | vmovdqu T, 32(Htbl) # H^3 * 2 |
michael@0 | 121 | call GFMUL |
michael@0 | 122 | vmovdqu T, 48(Htbl) # H^4 * 2 |
michael@0 | 123 | call GFMUL |
michael@0 | 124 | vmovdqu T, 64(Htbl) # H^5 * 2 |
michael@0 | 125 | call GFMUL |
michael@0 | 126 | vmovdqu T, 80(Htbl) # H^6 * 2 |
michael@0 | 127 | call GFMUL |
michael@0 | 128 | vmovdqu T, 96(Htbl) # H^7 * 2 |
michael@0 | 129 | call GFMUL |
michael@0 | 130 | vmovdqu T, 112(Htbl) # H^8 * 2 |
michael@0 | 131 | |
michael@0 | 132 | # Precalculations for the reduce 4 step |
michael@0 | 133 | vpshufd $78, (Htbl), %xmm8 |
michael@0 | 134 | vpshufd $78, 16(Htbl), %xmm9 |
michael@0 | 135 | vpshufd $78, 32(Htbl), %xmm10 |
michael@0 | 136 | vpshufd $78, 48(Htbl), %xmm11 |
michael@0 | 137 | vpshufd $78, 64(Htbl), %xmm12 |
michael@0 | 138 | vpshufd $78, 80(Htbl), %xmm13 |
michael@0 | 139 | vpshufd $78, 96(Htbl), %xmm14 |
michael@0 | 140 | vpshufd $78, 112(Htbl), %xmm15 |
michael@0 | 141 | |
michael@0 | 142 | vpxor (Htbl), %xmm8, %xmm8 |
michael@0 | 143 | vpxor 16(Htbl), %xmm9, %xmm9 |
michael@0 | 144 | vpxor 32(Htbl), %xmm10, %xmm10 |
michael@0 | 145 | vpxor 48(Htbl), %xmm11, %xmm11 |
michael@0 | 146 | vpxor 64(Htbl), %xmm12, %xmm12 |
michael@0 | 147 | vpxor 80(Htbl), %xmm13, %xmm13 |
michael@0 | 148 | vpxor 96(Htbl), %xmm14, %xmm14 |
michael@0 | 149 | vpxor 112(Htbl), %xmm15, %xmm15 |
michael@0 | 150 | |
michael@0 | 151 | vmovdqu %xmm8, 128(Htbl) |
michael@0 | 152 | vmovdqu %xmm9, 144(Htbl) |
michael@0 | 153 | vmovdqu %xmm10, 160(Htbl) |
michael@0 | 154 | vmovdqu %xmm11, 176(Htbl) |
michael@0 | 155 | vmovdqu %xmm12, 192(Htbl) |
michael@0 | 156 | vmovdqu %xmm13, 208(Htbl) |
michael@0 | 157 | vmovdqu %xmm14, 224(Htbl) |
michael@0 | 158 | vmovdqu %xmm15, 240(Htbl) |
michael@0 | 159 | |
michael@0 | 160 | ret |
michael@0 | 161 | .size intel_aes_gcmINIT, .-intel_aes_gcmINIT |
michael@0 | 162 | ################################################################################ |
michael@0 | 163 | # Authenticate only |
michael@0 | 164 | # void intel_aes_gcmAAD(uint8_t Htbl[16*16], uint8_t *AAD, uint64_t Alen, uint8_t *Tp); |
michael@0 | 165 | |
michael@0 | 166 | .globl intel_aes_gcmAAD |
michael@0 | 167 | .type intel_aes_gcmAAD,@function |
michael@0 | 168 | .align 16 |
michael@0 | 169 | intel_aes_gcmAAD: |
michael@0 | 170 | |
michael@0 | 171 | .set DATA, %xmm0 |
michael@0 | 172 | .set T, %xmm1 |
michael@0 | 173 | .set BSWAP_MASK, %xmm2 |
michael@0 | 174 | .set TMP0, %xmm3 |
michael@0 | 175 | .set TMP1, %xmm4 |
michael@0 | 176 | .set TMP2, %xmm5 |
michael@0 | 177 | .set TMP3, %xmm6 |
michael@0 | 178 | .set TMP4, %xmm7 |
michael@0 | 179 | .set Xhi, %xmm9 |
michael@0 | 180 | |
michael@0 | 181 | .set Htbl, %rdi |
michael@0 | 182 | .set inp, %rsi |
michael@0 | 183 | .set len, %rdx |
michael@0 | 184 | .set Tp, %rcx |
michael@0 | 185 | |
michael@0 | 186 | .set hlp0, %r11 |
michael@0 | 187 | |
michael@0 | 188 | .macro KARATSUBA_AAD i |
michael@0 | 189 | vpclmulqdq $0x00, 16*\i(Htbl), DATA, TMP3 |
michael@0 | 190 | vpxor TMP3, TMP0, TMP0 |
michael@0 | 191 | vpclmulqdq $0x11, 16*\i(Htbl), DATA, TMP3 |
michael@0 | 192 | vpxor TMP3, TMP1, TMP1 |
michael@0 | 193 | vpshufd $78, DATA, TMP3 |
michael@0 | 194 | vpxor DATA, TMP3, TMP3 |
michael@0 | 195 | vpclmulqdq $0x00, 16*(\i+8)(Htbl), TMP3, TMP3 |
michael@0 | 196 | vpxor TMP3, TMP2, TMP2 |
michael@0 | 197 | .endm |
michael@0 | 198 | |
michael@0 | 199 | test len, len |
michael@0 | 200 | jnz .LbeginAAD |
michael@0 | 201 | ret |
michael@0 | 202 | |
michael@0 | 203 | .LbeginAAD: |
michael@0 | 204 | |
michael@0 | 205 | push hlp0 |
michael@0 | 206 | vzeroupper |
michael@0 | 207 | |
michael@0 | 208 | vmovdqa .Lbswap_mask(%rip), BSWAP_MASK |
michael@0 | 209 | |
michael@0 | 210 | vpxor Xhi, Xhi, Xhi |
michael@0 | 211 | |
michael@0 | 212 | vmovdqu (Tp),T |
michael@0 | 213 | vpshufb BSWAP_MASK,T,T |
michael@0 | 214 | |
michael@0 | 215 | # we hash 8 block each iteration, if the total amount of blocks is not a multiple of 8, we hash the first n%8 blocks first |
michael@0 | 216 | mov len, hlp0 |
michael@0 | 217 | and $~-128, hlp0 |
michael@0 | 218 | |
michael@0 | 219 | jz .Lmod_loop |
michael@0 | 220 | |
michael@0 | 221 | sub hlp0, len |
michael@0 | 222 | sub $16, hlp0 |
michael@0 | 223 | |
michael@0 | 224 | #hash first prefix block |
michael@0 | 225 | vmovdqu (inp), DATA |
michael@0 | 226 | vpshufb BSWAP_MASK, DATA, DATA |
michael@0 | 227 | vpxor T, DATA, DATA |
michael@0 | 228 | |
michael@0 | 229 | vpclmulqdq $0x00, (Htbl, hlp0), DATA, TMP0 |
michael@0 | 230 | vpclmulqdq $0x11, (Htbl, hlp0), DATA, TMP1 |
michael@0 | 231 | vpshufd $78, DATA, TMP2 |
michael@0 | 232 | vpxor DATA, TMP2, TMP2 |
michael@0 | 233 | vpclmulqdq $0x00, 16*8(Htbl, hlp0), TMP2, TMP2 |
michael@0 | 234 | |
michael@0 | 235 | lea 16(inp), inp |
michael@0 | 236 | test hlp0, hlp0 |
michael@0 | 237 | jnz .Lpre_loop |
michael@0 | 238 | jmp .Lred1 |
michael@0 | 239 | |
michael@0 | 240 | #hash remaining prefix bocks (up to 7 total prefix blocks) |
michael@0 | 241 | .align 64 |
michael@0 | 242 | .Lpre_loop: |
michael@0 | 243 | |
michael@0 | 244 | sub $16, hlp0 |
michael@0 | 245 | |
michael@0 | 246 | vmovdqu (inp),DATA # next data block |
michael@0 | 247 | vpshufb BSWAP_MASK,DATA,DATA |
michael@0 | 248 | |
michael@0 | 249 | vpclmulqdq $0x00, (Htbl,hlp0), DATA, TMP3 |
michael@0 | 250 | vpxor TMP3, TMP0, TMP0 |
michael@0 | 251 | vpclmulqdq $0x11, (Htbl,hlp0), DATA, TMP3 |
michael@0 | 252 | vpxor TMP3, TMP1, TMP1 |
michael@0 | 253 | vpshufd $78, DATA, TMP3 |
michael@0 | 254 | vpxor DATA, TMP3, TMP3 |
michael@0 | 255 | vpclmulqdq $0x00, 16*8(Htbl,hlp0), TMP3, TMP3 |
michael@0 | 256 | vpxor TMP3, TMP2, TMP2 |
michael@0 | 257 | |
michael@0 | 258 | test hlp0, hlp0 |
michael@0 | 259 | |
michael@0 | 260 | lea 16(inp), inp |
michael@0 | 261 | |
michael@0 | 262 | jnz .Lpre_loop |
michael@0 | 263 | |
michael@0 | 264 | .Lred1: |
michael@0 | 265 | vpxor TMP0, TMP2, TMP2 |
michael@0 | 266 | vpxor TMP1, TMP2, TMP2 |
michael@0 | 267 | vpsrldq $8, TMP2, TMP3 |
michael@0 | 268 | vpslldq $8, TMP2, TMP2 |
michael@0 | 269 | |
michael@0 | 270 | vpxor TMP3, TMP1, Xhi |
michael@0 | 271 | vpxor TMP2, TMP0, T |
michael@0 | 272 | |
michael@0 | 273 | .align 64 |
michael@0 | 274 | .Lmod_loop: |
michael@0 | 275 | sub $0x80, len |
michael@0 | 276 | jb .Ldone |
michael@0 | 277 | |
michael@0 | 278 | vmovdqu 16*7(inp),DATA # Ii |
michael@0 | 279 | vpshufb BSWAP_MASK,DATA,DATA |
michael@0 | 280 | |
michael@0 | 281 | vpclmulqdq $0x00, (Htbl), DATA, TMP0 |
michael@0 | 282 | vpclmulqdq $0x11, (Htbl), DATA, TMP1 |
michael@0 | 283 | vpshufd $78, DATA, TMP2 |
michael@0 | 284 | vpxor DATA, TMP2, TMP2 |
michael@0 | 285 | vpclmulqdq $0x00, 16*8(Htbl), TMP2, TMP2 |
michael@0 | 286 | ######################################################### |
michael@0 | 287 | vmovdqu 16*6(inp),DATA |
michael@0 | 288 | vpshufb BSWAP_MASK,DATA,DATA |
michael@0 | 289 | KARATSUBA_AAD 1 |
michael@0 | 290 | ######################################################### |
michael@0 | 291 | vmovdqu 16*5(inp),DATA |
michael@0 | 292 | vpshufb BSWAP_MASK,DATA,DATA |
michael@0 | 293 | |
michael@0 | 294 | vpclmulqdq $0x10, .Lpoly(%rip), T, TMP4 #reduction stage 1a |
michael@0 | 295 | vpalignr $8, T, T, T |
michael@0 | 296 | |
michael@0 | 297 | KARATSUBA_AAD 2 |
michael@0 | 298 | |
michael@0 | 299 | vpxor TMP4, T, T #reduction stage 1b |
michael@0 | 300 | ######################################################### |
michael@0 | 301 | vmovdqu 16*4(inp),DATA |
michael@0 | 302 | vpshufb BSWAP_MASK,DATA,DATA |
michael@0 | 303 | |
michael@0 | 304 | KARATSUBA_AAD 3 |
michael@0 | 305 | ######################################################### |
michael@0 | 306 | vmovdqu 16*3(inp),DATA |
michael@0 | 307 | vpshufb BSWAP_MASK,DATA,DATA |
michael@0 | 308 | |
michael@0 | 309 | vpclmulqdq $0x10, .Lpoly(%rip), T, TMP4 #reduction stage 2a |
michael@0 | 310 | vpalignr $8, T, T, T |
michael@0 | 311 | |
michael@0 | 312 | KARATSUBA_AAD 4 |
michael@0 | 313 | |
michael@0 | 314 | vpxor TMP4, T, T #reduction stage 2b |
michael@0 | 315 | ######################################################### |
michael@0 | 316 | vmovdqu 16*2(inp),DATA |
michael@0 | 317 | vpshufb BSWAP_MASK,DATA,DATA |
michael@0 | 318 | |
michael@0 | 319 | KARATSUBA_AAD 5 |
michael@0 | 320 | |
michael@0 | 321 | vpxor Xhi, T, T #reduction finalize |
michael@0 | 322 | ######################################################### |
michael@0 | 323 | vmovdqu 16*1(inp),DATA |
michael@0 | 324 | vpshufb BSWAP_MASK,DATA,DATA |
michael@0 | 325 | |
michael@0 | 326 | KARATSUBA_AAD 6 |
michael@0 | 327 | ######################################################### |
michael@0 | 328 | vmovdqu 16*0(inp),DATA |
michael@0 | 329 | vpshufb BSWAP_MASK,DATA,DATA |
michael@0 | 330 | vpxor T,DATA,DATA |
michael@0 | 331 | |
michael@0 | 332 | KARATSUBA_AAD 7 |
michael@0 | 333 | ######################################################### |
michael@0 | 334 | vpxor TMP0, TMP2, TMP2 # karatsuba fixup |
michael@0 | 335 | vpxor TMP1, TMP2, TMP2 |
michael@0 | 336 | vpsrldq $8, TMP2, TMP3 |
michael@0 | 337 | vpslldq $8, TMP2, TMP2 |
michael@0 | 338 | |
michael@0 | 339 | vpxor TMP3, TMP1, Xhi |
michael@0 | 340 | vpxor TMP2, TMP0, T |
michael@0 | 341 | |
michael@0 | 342 | lea 16*8(inp), inp |
michael@0 | 343 | jmp .Lmod_loop |
michael@0 | 344 | ######################################################### |
michael@0 | 345 | |
michael@0 | 346 | .Ldone: |
michael@0 | 347 | vpclmulqdq $0x10, .Lpoly(%rip), T, TMP3 |
michael@0 | 348 | vpalignr $8, T, T, T |
michael@0 | 349 | vpxor TMP3, T, T |
michael@0 | 350 | |
michael@0 | 351 | vpclmulqdq $0x10, .Lpoly(%rip), T, TMP3 |
michael@0 | 352 | vpalignr $8, T, T, T |
michael@0 | 353 | vpxor TMP3, T, T |
michael@0 | 354 | |
michael@0 | 355 | vpxor Xhi, T, T |
michael@0 | 356 | |
michael@0 | 357 | .Lsave: |
michael@0 | 358 | vpshufb BSWAP_MASK,T, T |
michael@0 | 359 | vmovdqu T,(Tp) |
michael@0 | 360 | vzeroupper |
michael@0 | 361 | |
michael@0 | 362 | pop hlp0 |
michael@0 | 363 | ret |
michael@0 | 364 | .size intel_aes_gcmAAD,.-intel_aes_gcmAAD |
michael@0 | 365 | |
michael@0 | 366 | ################################################################################ |
michael@0 | 367 | # Encrypt and Authenticate |
michael@0 | 368 | # void intel_aes_gcmENC(uint8_t* PT, uint8_t* CT, void *Gctx,uint64_t len); |
michael@0 | 369 | .type intel_aes_gcmENC,@function |
michael@0 | 370 | .globl intel_aes_gcmENC |
michael@0 | 371 | .align 16 |
michael@0 | 372 | intel_aes_gcmENC: |
michael@0 | 373 | |
michael@0 | 374 | .set PT,%rdi |
michael@0 | 375 | .set CT,%rsi |
michael@0 | 376 | .set Htbl, %rdx |
michael@0 | 377 | .set len, %rcx |
michael@0 | 378 | .set KS,%r9 |
michael@0 | 379 | .set NR,%r10d |
michael@0 | 380 | |
michael@0 | 381 | .set Gctx, %rdx |
michael@0 | 382 | |
michael@0 | 383 | .set T,%xmm0 |
michael@0 | 384 | .set TMP0,%xmm1 |
michael@0 | 385 | .set TMP1,%xmm2 |
michael@0 | 386 | .set TMP2,%xmm3 |
michael@0 | 387 | .set TMP3,%xmm4 |
michael@0 | 388 | .set TMP4,%xmm5 |
michael@0 | 389 | .set TMP5,%xmm6 |
michael@0 | 390 | .set CTR0,%xmm7 |
michael@0 | 391 | .set CTR1,%xmm8 |
michael@0 | 392 | .set CTR2,%xmm9 |
michael@0 | 393 | .set CTR3,%xmm10 |
michael@0 | 394 | .set CTR4,%xmm11 |
michael@0 | 395 | .set CTR5,%xmm12 |
michael@0 | 396 | .set CTR6,%xmm13 |
michael@0 | 397 | .set CTR7,%xmm14 |
michael@0 | 398 | .set CTR,%xmm15 |
michael@0 | 399 | |
michael@0 | 400 | .macro ROUND i |
michael@0 | 401 | vmovdqu \i*16(KS), TMP3 |
michael@0 | 402 | vaesenc TMP3, CTR0, CTR0 |
michael@0 | 403 | vaesenc TMP3, CTR1, CTR1 |
michael@0 | 404 | vaesenc TMP3, CTR2, CTR2 |
michael@0 | 405 | vaesenc TMP3, CTR3, CTR3 |
michael@0 | 406 | vaesenc TMP3, CTR4, CTR4 |
michael@0 | 407 | vaesenc TMP3, CTR5, CTR5 |
michael@0 | 408 | vaesenc TMP3, CTR6, CTR6 |
michael@0 | 409 | vaesenc TMP3, CTR7, CTR7 |
michael@0 | 410 | .endm |
michael@0 | 411 | |
michael@0 | 412 | .macro ROUNDMUL i |
michael@0 | 413 | |
michael@0 | 414 | vmovdqu \i*16(%rsp), TMP5 |
michael@0 | 415 | vmovdqu \i*16(KS), TMP3 |
michael@0 | 416 | |
michael@0 | 417 | vaesenc TMP3, CTR0, CTR0 |
michael@0 | 418 | vaesenc TMP3, CTR1, CTR1 |
michael@0 | 419 | vaesenc TMP3, CTR2, CTR2 |
michael@0 | 420 | vaesenc TMP3, CTR3, CTR3 |
michael@0 | 421 | |
michael@0 | 422 | vpshufd $78, TMP5, TMP4 |
michael@0 | 423 | vpxor TMP5, TMP4, TMP4 |
michael@0 | 424 | |
michael@0 | 425 | vaesenc TMP3, CTR4, CTR4 |
michael@0 | 426 | vaesenc TMP3, CTR5, CTR5 |
michael@0 | 427 | vaesenc TMP3, CTR6, CTR6 |
michael@0 | 428 | vaesenc TMP3, CTR7, CTR7 |
michael@0 | 429 | |
michael@0 | 430 | vpclmulqdq $0x00, 128+\i*16(Htbl), TMP4, TMP3 |
michael@0 | 431 | vpxor TMP3, TMP0, TMP0 |
michael@0 | 432 | vmovdqa \i*16(Htbl), TMP4 |
michael@0 | 433 | vpclmulqdq $0x11, TMP4, TMP5, TMP3 |
michael@0 | 434 | vpxor TMP3, TMP1, TMP1 |
michael@0 | 435 | vpclmulqdq $0x00, TMP4, TMP5, TMP3 |
michael@0 | 436 | vpxor TMP3, TMP2, TMP2 |
michael@0 | 437 | |
michael@0 | 438 | .endm |
michael@0 | 439 | |
michael@0 | 440 | .macro KARATSUBA i |
michael@0 | 441 | vmovdqu \i*16(%rsp), TMP5 |
michael@0 | 442 | |
michael@0 | 443 | vpclmulqdq $0x11, 16*\i(Htbl), TMP5, TMP3 |
michael@0 | 444 | vpxor TMP3, TMP1, TMP1 |
michael@0 | 445 | vpclmulqdq $0x00, 16*\i(Htbl), TMP5, TMP3 |
michael@0 | 446 | vpxor TMP3, TMP2, TMP2 |
michael@0 | 447 | vpshufd $78, TMP5, TMP3 |
michael@0 | 448 | vpxor TMP5, TMP3, TMP5 |
michael@0 | 449 | vpclmulqdq $0x00, 128+\i*16(Htbl), TMP5, TMP3 |
michael@0 | 450 | vpxor TMP3, TMP0, TMP0 |
michael@0 | 451 | .endm |
michael@0 | 452 | |
michael@0 | 453 | test len, len |
michael@0 | 454 | jnz .Lbegin |
michael@0 | 455 | ret |
michael@0 | 456 | |
michael@0 | 457 | .Lbegin: |
michael@0 | 458 | |
michael@0 | 459 | vzeroupper |
michael@0 | 460 | push %rbp |
michael@0 | 461 | push %rbx |
michael@0 | 462 | |
michael@0 | 463 | movq %rsp, %rbp |
michael@0 | 464 | sub $128, %rsp |
michael@0 | 465 | andq $-16, %rsp |
michael@0 | 466 | |
michael@0 | 467 | vmovdqu 288(Gctx), CTR |
michael@0 | 468 | vmovdqu 272(Gctx), T |
michael@0 | 469 | mov 304(Gctx), KS |
michael@0 | 470 | mov 4(KS), NR |
michael@0 | 471 | lea 48(KS), KS |
michael@0 | 472 | |
michael@0 | 473 | vpshufb .Lbswap_mask(%rip), CTR, CTR |
michael@0 | 474 | vpshufb .Lbswap_mask(%rip), T, T |
michael@0 | 475 | |
michael@0 | 476 | cmp $128, len |
michael@0 | 477 | jb .LDataSingles |
michael@0 | 478 | |
michael@0 | 479 | # Encrypt the first eight blocks |
michael@0 | 480 | sub $128, len |
michael@0 | 481 | vmovdqa CTR, CTR0 |
michael@0 | 482 | vpaddd .Lone(%rip), CTR0, CTR1 |
michael@0 | 483 | vpaddd .Ltwo(%rip), CTR0, CTR2 |
michael@0 | 484 | vpaddd .Lone(%rip), CTR2, CTR3 |
michael@0 | 485 | vpaddd .Ltwo(%rip), CTR2, CTR4 |
michael@0 | 486 | vpaddd .Lone(%rip), CTR4, CTR5 |
michael@0 | 487 | vpaddd .Ltwo(%rip), CTR4, CTR6 |
michael@0 | 488 | vpaddd .Lone(%rip), CTR6, CTR7 |
michael@0 | 489 | vpaddd .Ltwo(%rip), CTR6, CTR |
michael@0 | 490 | |
michael@0 | 491 | vpshufb .Lbswap_mask(%rip), CTR0, CTR0 |
michael@0 | 492 | vpshufb .Lbswap_mask(%rip), CTR1, CTR1 |
michael@0 | 493 | vpshufb .Lbswap_mask(%rip), CTR2, CTR2 |
michael@0 | 494 | vpshufb .Lbswap_mask(%rip), CTR3, CTR3 |
michael@0 | 495 | vpshufb .Lbswap_mask(%rip), CTR4, CTR4 |
michael@0 | 496 | vpshufb .Lbswap_mask(%rip), CTR5, CTR5 |
michael@0 | 497 | vpshufb .Lbswap_mask(%rip), CTR6, CTR6 |
michael@0 | 498 | vpshufb .Lbswap_mask(%rip), CTR7, CTR7 |
michael@0 | 499 | |
michael@0 | 500 | vpxor (KS), CTR0, CTR0 |
michael@0 | 501 | vpxor (KS), CTR1, CTR1 |
michael@0 | 502 | vpxor (KS), CTR2, CTR2 |
michael@0 | 503 | vpxor (KS), CTR3, CTR3 |
michael@0 | 504 | vpxor (KS), CTR4, CTR4 |
michael@0 | 505 | vpxor (KS), CTR5, CTR5 |
michael@0 | 506 | vpxor (KS), CTR6, CTR6 |
michael@0 | 507 | vpxor (KS), CTR7, CTR7 |
michael@0 | 508 | |
michael@0 | 509 | ROUND 1 |
michael@0 | 510 | ROUND 2 |
michael@0 | 511 | ROUND 3 |
michael@0 | 512 | ROUND 4 |
michael@0 | 513 | ROUND 5 |
michael@0 | 514 | ROUND 6 |
michael@0 | 515 | ROUND 7 |
michael@0 | 516 | ROUND 8 |
michael@0 | 517 | ROUND 9 |
michael@0 | 518 | |
michael@0 | 519 | vmovdqu 160(KS), TMP5 |
michael@0 | 520 | cmp $12, NR |
michael@0 | 521 | jb .LLast1 |
michael@0 | 522 | |
michael@0 | 523 | ROUND 10 |
michael@0 | 524 | ROUND 11 |
michael@0 | 525 | |
michael@0 | 526 | vmovdqu 192(KS), TMP5 |
michael@0 | 527 | cmp $14, NR |
michael@0 | 528 | jb .LLast1 |
michael@0 | 529 | |
michael@0 | 530 | ROUND 12 |
michael@0 | 531 | ROUND 13 |
michael@0 | 532 | |
michael@0 | 533 | vmovdqu 224(KS), TMP5 |
michael@0 | 534 | |
michael@0 | 535 | .LLast1: |
michael@0 | 536 | |
michael@0 | 537 | vpxor (PT), TMP5, TMP3 |
michael@0 | 538 | vaesenclast TMP3, CTR0, CTR0 |
michael@0 | 539 | vpxor 16(PT), TMP5, TMP3 |
michael@0 | 540 | vaesenclast TMP3, CTR1, CTR1 |
michael@0 | 541 | vpxor 32(PT), TMP5, TMP3 |
michael@0 | 542 | vaesenclast TMP3, CTR2, CTR2 |
michael@0 | 543 | vpxor 48(PT), TMP5, TMP3 |
michael@0 | 544 | vaesenclast TMP3, CTR3, CTR3 |
michael@0 | 545 | vpxor 64(PT), TMP5, TMP3 |
michael@0 | 546 | vaesenclast TMP3, CTR4, CTR4 |
michael@0 | 547 | vpxor 80(PT), TMP5, TMP3 |
michael@0 | 548 | vaesenclast TMP3, CTR5, CTR5 |
michael@0 | 549 | vpxor 96(PT), TMP5, TMP3 |
michael@0 | 550 | vaesenclast TMP3, CTR6, CTR6 |
michael@0 | 551 | vpxor 112(PT), TMP5, TMP3 |
michael@0 | 552 | vaesenclast TMP3, CTR7, CTR7 |
michael@0 | 553 | |
michael@0 | 554 | vmovdqu .Lbswap_mask(%rip), TMP3 |
michael@0 | 555 | |
michael@0 | 556 | vmovdqu CTR0, (CT) |
michael@0 | 557 | vpshufb TMP3, CTR0, CTR0 |
michael@0 | 558 | vmovdqu CTR1, 16(CT) |
michael@0 | 559 | vpshufb TMP3, CTR1, CTR1 |
michael@0 | 560 | vmovdqu CTR2, 32(CT) |
michael@0 | 561 | vpshufb TMP3, CTR2, CTR2 |
michael@0 | 562 | vmovdqu CTR3, 48(CT) |
michael@0 | 563 | vpshufb TMP3, CTR3, CTR3 |
michael@0 | 564 | vmovdqu CTR4, 64(CT) |
michael@0 | 565 | vpshufb TMP3, CTR4, CTR4 |
michael@0 | 566 | vmovdqu CTR5, 80(CT) |
michael@0 | 567 | vpshufb TMP3, CTR5, CTR5 |
michael@0 | 568 | vmovdqu CTR6, 96(CT) |
michael@0 | 569 | vpshufb TMP3, CTR6, CTR6 |
michael@0 | 570 | vmovdqu CTR7, 112(CT) |
michael@0 | 571 | vpshufb TMP3, CTR7, CTR7 |
michael@0 | 572 | |
michael@0 | 573 | lea 128(CT), CT |
michael@0 | 574 | lea 128(PT), PT |
michael@0 | 575 | jmp .LDataOctets |
michael@0 | 576 | |
michael@0 | 577 | # Encrypt 8 blocks each time while hashing previous 8 blocks |
michael@0 | 578 | .align 64 |
michael@0 | 579 | .LDataOctets: |
michael@0 | 580 | cmp $128, len |
michael@0 | 581 | jb .LEndOctets |
michael@0 | 582 | sub $128, len |
michael@0 | 583 | |
michael@0 | 584 | vmovdqa CTR7, TMP5 |
michael@0 | 585 | vmovdqa CTR6, 1*16(%rsp) |
michael@0 | 586 | vmovdqa CTR5, 2*16(%rsp) |
michael@0 | 587 | vmovdqa CTR4, 3*16(%rsp) |
michael@0 | 588 | vmovdqa CTR3, 4*16(%rsp) |
michael@0 | 589 | vmovdqa CTR2, 5*16(%rsp) |
michael@0 | 590 | vmovdqa CTR1, 6*16(%rsp) |
michael@0 | 591 | vmovdqa CTR0, 7*16(%rsp) |
michael@0 | 592 | |
michael@0 | 593 | vmovdqa CTR, CTR0 |
michael@0 | 594 | vpaddd .Lone(%rip), CTR0, CTR1 |
michael@0 | 595 | vpaddd .Ltwo(%rip), CTR0, CTR2 |
michael@0 | 596 | vpaddd .Lone(%rip), CTR2, CTR3 |
michael@0 | 597 | vpaddd .Ltwo(%rip), CTR2, CTR4 |
michael@0 | 598 | vpaddd .Lone(%rip), CTR4, CTR5 |
michael@0 | 599 | vpaddd .Ltwo(%rip), CTR4, CTR6 |
michael@0 | 600 | vpaddd .Lone(%rip), CTR6, CTR7 |
michael@0 | 601 | vpaddd .Ltwo(%rip), CTR6, CTR |
michael@0 | 602 | |
michael@0 | 603 | vmovdqu (KS), TMP4 |
michael@0 | 604 | vpshufb TMP3, CTR0, CTR0 |
michael@0 | 605 | vpxor TMP4, CTR0, CTR0 |
michael@0 | 606 | vpshufb TMP3, CTR1, CTR1 |
michael@0 | 607 | vpxor TMP4, CTR1, CTR1 |
michael@0 | 608 | vpshufb TMP3, CTR2, CTR2 |
michael@0 | 609 | vpxor TMP4, CTR2, CTR2 |
michael@0 | 610 | vpshufb TMP3, CTR3, CTR3 |
michael@0 | 611 | vpxor TMP4, CTR3, CTR3 |
michael@0 | 612 | vpshufb TMP3, CTR4, CTR4 |
michael@0 | 613 | vpxor TMP4, CTR4, CTR4 |
michael@0 | 614 | vpshufb TMP3, CTR5, CTR5 |
michael@0 | 615 | vpxor TMP4, CTR5, CTR5 |
michael@0 | 616 | vpshufb TMP3, CTR6, CTR6 |
michael@0 | 617 | vpxor TMP4, CTR6, CTR6 |
michael@0 | 618 | vpshufb TMP3, CTR7, CTR7 |
michael@0 | 619 | vpxor TMP4, CTR7, CTR7 |
michael@0 | 620 | |
michael@0 | 621 | vmovdqu 16*0(Htbl), TMP3 |
michael@0 | 622 | vpclmulqdq $0x11, TMP3, TMP5, TMP1 |
michael@0 | 623 | vpclmulqdq $0x00, TMP3, TMP5, TMP2 |
michael@0 | 624 | vpshufd $78, TMP5, TMP3 |
michael@0 | 625 | vpxor TMP5, TMP3, TMP5 |
michael@0 | 626 | vmovdqu 128+0*16(Htbl), TMP3 |
michael@0 | 627 | vpclmulqdq $0x00, TMP3, TMP5, TMP0 |
michael@0 | 628 | |
michael@0 | 629 | ROUNDMUL 1 |
michael@0 | 630 | |
michael@0 | 631 | ROUNDMUL 2 |
michael@0 | 632 | |
michael@0 | 633 | ROUNDMUL 3 |
michael@0 | 634 | |
michael@0 | 635 | ROUNDMUL 4 |
michael@0 | 636 | |
michael@0 | 637 | ROUNDMUL 5 |
michael@0 | 638 | |
michael@0 | 639 | ROUNDMUL 6 |
michael@0 | 640 | |
michael@0 | 641 | vpxor 7*16(%rsp), T, TMP5 |
michael@0 | 642 | vmovdqu 7*16(KS), TMP3 |
michael@0 | 643 | |
michael@0 | 644 | vaesenc TMP3, CTR0, CTR0 |
michael@0 | 645 | vaesenc TMP3, CTR1, CTR1 |
michael@0 | 646 | vaesenc TMP3, CTR2, CTR2 |
michael@0 | 647 | vaesenc TMP3, CTR3, CTR3 |
michael@0 | 648 | |
michael@0 | 649 | vpshufd $78, TMP5, TMP4 |
michael@0 | 650 | vpxor TMP5, TMP4, TMP4 |
michael@0 | 651 | |
michael@0 | 652 | vaesenc TMP3, CTR4, CTR4 |
michael@0 | 653 | vaesenc TMP3, CTR5, CTR5 |
michael@0 | 654 | vaesenc TMP3, CTR6, CTR6 |
michael@0 | 655 | vaesenc TMP3, CTR7, CTR7 |
michael@0 | 656 | |
michael@0 | 657 | vpclmulqdq $0x11, 7*16(Htbl), TMP5, TMP3 |
michael@0 | 658 | vpxor TMP3, TMP1, TMP1 |
michael@0 | 659 | vpclmulqdq $0x00, 7*16(Htbl), TMP5, TMP3 |
michael@0 | 660 | vpxor TMP3, TMP2, TMP2 |
michael@0 | 661 | vpclmulqdq $0x00, 128+7*16(Htbl), TMP4, TMP3 |
michael@0 | 662 | vpxor TMP3, TMP0, TMP0 |
michael@0 | 663 | |
michael@0 | 664 | ROUND 8 |
michael@0 | 665 | vmovdqa .Lpoly(%rip), TMP5 |
michael@0 | 666 | |
michael@0 | 667 | vpxor TMP1, TMP0, TMP0 |
michael@0 | 668 | vpxor TMP2, TMP0, TMP0 |
michael@0 | 669 | vpsrldq $8, TMP0, TMP3 |
michael@0 | 670 | vpxor TMP3, TMP1, TMP4 |
michael@0 | 671 | vpslldq $8, TMP0, TMP3 |
michael@0 | 672 | vpxor TMP3, TMP2, T |
michael@0 | 673 | |
michael@0 | 674 | vpclmulqdq $0x10, TMP5, T, TMP1 |
michael@0 | 675 | vpalignr $8, T, T, T |
michael@0 | 676 | vpxor T, TMP1, T |
michael@0 | 677 | |
michael@0 | 678 | ROUND 9 |
michael@0 | 679 | |
michael@0 | 680 | vpclmulqdq $0x10, TMP5, T, TMP1 |
michael@0 | 681 | vpalignr $8, T, T, T |
michael@0 | 682 | vpxor T, TMP1, T |
michael@0 | 683 | |
michael@0 | 684 | vmovdqu 160(KS), TMP5 |
michael@0 | 685 | cmp $10, NR |
michael@0 | 686 | jbe .LLast2 |
michael@0 | 687 | |
michael@0 | 688 | ROUND 10 |
michael@0 | 689 | ROUND 11 |
michael@0 | 690 | |
michael@0 | 691 | vmovdqu 192(KS), TMP5 |
michael@0 | 692 | cmp $12, NR |
michael@0 | 693 | jbe .LLast2 |
michael@0 | 694 | |
michael@0 | 695 | ROUND 12 |
michael@0 | 696 | ROUND 13 |
michael@0 | 697 | |
michael@0 | 698 | vmovdqu 224(KS), TMP5 |
michael@0 | 699 | |
michael@0 | 700 | .LLast2: |
michael@0 | 701 | |
michael@0 | 702 | vpxor (PT), TMP5, TMP3 |
michael@0 | 703 | vaesenclast TMP3, CTR0, CTR0 |
michael@0 | 704 | vpxor 16(PT), TMP5, TMP3 |
michael@0 | 705 | vaesenclast TMP3, CTR1, CTR1 |
michael@0 | 706 | vpxor 32(PT), TMP5, TMP3 |
michael@0 | 707 | vaesenclast TMP3, CTR2, CTR2 |
michael@0 | 708 | vpxor 48(PT), TMP5, TMP3 |
michael@0 | 709 | vaesenclast TMP3, CTR3, CTR3 |
michael@0 | 710 | vpxor 64(PT), TMP5, TMP3 |
michael@0 | 711 | vaesenclast TMP3, CTR4, CTR4 |
michael@0 | 712 | vpxor 80(PT), TMP5, TMP3 |
michael@0 | 713 | vaesenclast TMP3, CTR5, CTR5 |
michael@0 | 714 | vpxor 96(PT), TMP5, TMP3 |
michael@0 | 715 | vaesenclast TMP3, CTR6, CTR6 |
michael@0 | 716 | vpxor 112(PT), TMP5, TMP3 |
michael@0 | 717 | vaesenclast TMP3, CTR7, CTR7 |
michael@0 | 718 | |
michael@0 | 719 | vmovdqu .Lbswap_mask(%rip), TMP3 |
michael@0 | 720 | |
michael@0 | 721 | vmovdqu CTR0, (CT) |
michael@0 | 722 | vpshufb TMP3, CTR0, CTR0 |
michael@0 | 723 | vmovdqu CTR1, 16(CT) |
michael@0 | 724 | vpshufb TMP3, CTR1, CTR1 |
michael@0 | 725 | vmovdqu CTR2, 32(CT) |
michael@0 | 726 | vpshufb TMP3, CTR2, CTR2 |
michael@0 | 727 | vmovdqu CTR3, 48(CT) |
michael@0 | 728 | vpshufb TMP3, CTR3, CTR3 |
michael@0 | 729 | vmovdqu CTR4, 64(CT) |
michael@0 | 730 | vpshufb TMP3, CTR4, CTR4 |
michael@0 | 731 | vmovdqu CTR5, 80(CT) |
michael@0 | 732 | vpshufb TMP3, CTR5, CTR5 |
michael@0 | 733 | vmovdqu CTR6, 96(CT) |
michael@0 | 734 | vpshufb TMP3, CTR6, CTR6 |
michael@0 | 735 | vmovdqu CTR7,112(CT) |
michael@0 | 736 | vpshufb TMP3, CTR7, CTR7 |
michael@0 | 737 | |
michael@0 | 738 | vpxor TMP4, T, T |
michael@0 | 739 | |
michael@0 | 740 | lea 128(CT), CT |
michael@0 | 741 | lea 128(PT), PT |
michael@0 | 742 | jmp .LDataOctets |
michael@0 | 743 | |
michael@0 | 744 | .LEndOctets: |
michael@0 | 745 | |
michael@0 | 746 | vmovdqa CTR7, TMP5 |
michael@0 | 747 | vmovdqa CTR6, 1*16(%rsp) |
michael@0 | 748 | vmovdqa CTR5, 2*16(%rsp) |
michael@0 | 749 | vmovdqa CTR4, 3*16(%rsp) |
michael@0 | 750 | vmovdqa CTR3, 4*16(%rsp) |
michael@0 | 751 | vmovdqa CTR2, 5*16(%rsp) |
michael@0 | 752 | vmovdqa CTR1, 6*16(%rsp) |
michael@0 | 753 | vmovdqa CTR0, 7*16(%rsp) |
michael@0 | 754 | |
michael@0 | 755 | vmovdqu 16*0(Htbl), TMP3 |
michael@0 | 756 | vpclmulqdq $0x11, TMP3, TMP5, TMP1 |
michael@0 | 757 | vpclmulqdq $0x00, TMP3, TMP5, TMP2 |
michael@0 | 758 | vpshufd $78, TMP5, TMP3 |
michael@0 | 759 | vpxor TMP5, TMP3, TMP5 |
michael@0 | 760 | vmovdqu 128+0*16(Htbl), TMP3 |
michael@0 | 761 | vpclmulqdq $0x00, TMP3, TMP5, TMP0 |
michael@0 | 762 | |
michael@0 | 763 | KARATSUBA 1 |
michael@0 | 764 | KARATSUBA 2 |
michael@0 | 765 | KARATSUBA 3 |
michael@0 | 766 | KARATSUBA 4 |
michael@0 | 767 | KARATSUBA 5 |
michael@0 | 768 | KARATSUBA 6 |
michael@0 | 769 | |
michael@0 | 770 | vmovdqu 7*16(%rsp), TMP5 |
michael@0 | 771 | vpxor T, TMP5, TMP5 |
michael@0 | 772 | vmovdqu 16*7(Htbl), TMP4 |
michael@0 | 773 | vpclmulqdq $0x11, TMP4, TMP5, TMP3 |
michael@0 | 774 | vpxor TMP3, TMP1, TMP1 |
michael@0 | 775 | vpclmulqdq $0x00, TMP4, TMP5, TMP3 |
michael@0 | 776 | vpxor TMP3, TMP2, TMP2 |
michael@0 | 777 | vpshufd $78, TMP5, TMP3 |
michael@0 | 778 | vpxor TMP5, TMP3, TMP5 |
michael@0 | 779 | vmovdqu 128+7*16(Htbl), TMP4 |
michael@0 | 780 | vpclmulqdq $0x00, TMP4, TMP5, TMP3 |
michael@0 | 781 | vpxor TMP3, TMP0, TMP0 |
michael@0 | 782 | |
michael@0 | 783 | vpxor TMP1, TMP0, TMP0 |
michael@0 | 784 | vpxor TMP2, TMP0, TMP0 |
michael@0 | 785 | |
michael@0 | 786 | vpsrldq $8, TMP0, TMP3 |
michael@0 | 787 | vpxor TMP3, TMP1, TMP4 |
michael@0 | 788 | vpslldq $8, TMP0, TMP3 |
michael@0 | 789 | vpxor TMP3, TMP2, T |
michael@0 | 790 | |
michael@0 | 791 | vmovdqa .Lpoly(%rip), TMP2 |
michael@0 | 792 | |
michael@0 | 793 | vpalignr $8, T, T, TMP1 |
michael@0 | 794 | vpclmulqdq $0x10, TMP2, T, T |
michael@0 | 795 | vpxor T, TMP1, T |
michael@0 | 796 | |
michael@0 | 797 | vpalignr $8, T, T, TMP1 |
michael@0 | 798 | vpclmulqdq $0x10, TMP2, T, T |
michael@0 | 799 | vpxor T, TMP1, T |
michael@0 | 800 | |
michael@0 | 801 | vpxor TMP4, T, T |
michael@0 | 802 | |
michael@0 | 803 | #Here we encrypt any remaining whole block |
michael@0 | 804 | .LDataSingles: |
michael@0 | 805 | |
michael@0 | 806 | cmp $16, len |
michael@0 | 807 | jb .LDataTail |
michael@0 | 808 | sub $16, len |
michael@0 | 809 | |
michael@0 | 810 | vpshufb .Lbswap_mask(%rip), CTR, TMP1 |
michael@0 | 811 | vpaddd .Lone(%rip), CTR, CTR |
michael@0 | 812 | |
michael@0 | 813 | vpxor (KS), TMP1, TMP1 |
michael@0 | 814 | vaesenc 16*1(KS), TMP1, TMP1 |
michael@0 | 815 | vaesenc 16*2(KS), TMP1, TMP1 |
michael@0 | 816 | vaesenc 16*3(KS), TMP1, TMP1 |
michael@0 | 817 | vaesenc 16*4(KS), TMP1, TMP1 |
michael@0 | 818 | vaesenc 16*5(KS), TMP1, TMP1 |
michael@0 | 819 | vaesenc 16*6(KS), TMP1, TMP1 |
michael@0 | 820 | vaesenc 16*7(KS), TMP1, TMP1 |
michael@0 | 821 | vaesenc 16*8(KS), TMP1, TMP1 |
michael@0 | 822 | vaesenc 16*9(KS), TMP1, TMP1 |
michael@0 | 823 | vmovdqu 16*10(KS), TMP2 |
michael@0 | 824 | cmp $10, NR |
michael@0 | 825 | je .LLast3 |
michael@0 | 826 | vaesenc 16*10(KS), TMP1, TMP1 |
michael@0 | 827 | vaesenc 16*11(KS), TMP1, TMP1 |
michael@0 | 828 | vmovdqu 16*12(KS), TMP2 |
michael@0 | 829 | cmp $12, NR |
michael@0 | 830 | je .LLast3 |
michael@0 | 831 | vaesenc 16*12(KS), TMP1, TMP1 |
michael@0 | 832 | vaesenc 16*13(KS), TMP1, TMP1 |
michael@0 | 833 | vmovdqu 16*14(KS), TMP2 |
michael@0 | 834 | |
michael@0 | 835 | .LLast3: |
michael@0 | 836 | vaesenclast TMP2, TMP1, TMP1 |
michael@0 | 837 | |
michael@0 | 838 | vpxor (PT), TMP1, TMP1 |
michael@0 | 839 | vmovdqu TMP1, (CT) |
michael@0 | 840 | addq $16, CT |
michael@0 | 841 | addq $16, PT |
michael@0 | 842 | |
michael@0 | 843 | vpshufb .Lbswap_mask(%rip), TMP1, TMP1 |
michael@0 | 844 | vpxor TMP1, T, T |
michael@0 | 845 | vmovdqu (Htbl), TMP0 |
michael@0 | 846 | call GFMUL |
michael@0 | 847 | |
michael@0 | 848 | jmp .LDataSingles |
michael@0 | 849 | |
michael@0 | 850 | #Here we encypt the final partial block, if there is one |
michael@0 | 851 | .LDataTail: |
michael@0 | 852 | |
michael@0 | 853 | test len, len |
michael@0 | 854 | jz DATA_END |
michael@0 | 855 | # First prepare the counter block |
michael@0 | 856 | vpshufb .Lbswap_mask(%rip), CTR, TMP1 |
michael@0 | 857 | vpaddd .Lone(%rip), CTR, CTR |
michael@0 | 858 | |
michael@0 | 859 | vpxor (KS), TMP1, TMP1 |
michael@0 | 860 | vaesenc 16*1(KS), TMP1, TMP1 |
michael@0 | 861 | vaesenc 16*2(KS), TMP1, TMP1 |
michael@0 | 862 | vaesenc 16*3(KS), TMP1, TMP1 |
michael@0 | 863 | vaesenc 16*4(KS), TMP1, TMP1 |
michael@0 | 864 | vaesenc 16*5(KS), TMP1, TMP1 |
michael@0 | 865 | vaesenc 16*6(KS), TMP1, TMP1 |
michael@0 | 866 | vaesenc 16*7(KS), TMP1, TMP1 |
michael@0 | 867 | vaesenc 16*8(KS), TMP1, TMP1 |
michael@0 | 868 | vaesenc 16*9(KS), TMP1, TMP1 |
michael@0 | 869 | vmovdqu 16*10(KS), TMP2 |
michael@0 | 870 | cmp $10, NR |
michael@0 | 871 | je .LLast4 |
michael@0 | 872 | vaesenc 16*10(KS), TMP1, TMP1 |
michael@0 | 873 | vaesenc 16*11(KS), TMP1, TMP1 |
michael@0 | 874 | vmovdqu 16*12(KS), TMP2 |
michael@0 | 875 | cmp $12, NR |
michael@0 | 876 | je .LLast4 |
michael@0 | 877 | vaesenc 16*12(KS), TMP1, TMP1 |
michael@0 | 878 | vaesenc 16*13(KS), TMP1, TMP1 |
michael@0 | 879 | vmovdqu 16*14(KS), TMP2 |
michael@0 | 880 | |
michael@0 | 881 | .LLast4: |
michael@0 | 882 | vaesenclast TMP2, TMP1, TMP1 |
michael@0 | 883 | #Zero a temp location |
michael@0 | 884 | vpxor TMP2, TMP2, TMP2 |
michael@0 | 885 | vmovdqa TMP2, (%rsp) |
michael@0 | 886 | |
michael@0 | 887 | # Copy the required bytes only (could probably use rep movsb) |
michael@0 | 888 | xor KS, KS |
michael@0 | 889 | .LEncCpy: |
michael@0 | 890 | cmp KS, len |
michael@0 | 891 | je .LEncCpyEnd |
michael@0 | 892 | movb (PT, KS, 1), %r8b |
michael@0 | 893 | movb %r8b, (%rsp, KS, 1) |
michael@0 | 894 | inc KS |
michael@0 | 895 | jmp .LEncCpy |
michael@0 | 896 | .LEncCpyEnd: |
michael@0 | 897 | # Xor with the counter block |
michael@0 | 898 | vpxor (%rsp), TMP1, TMP0 |
michael@0 | 899 | # Again, store at temp location |
michael@0 | 900 | vmovdqa TMP0, (%rsp) |
michael@0 | 901 | # Copy only the required bytes to CT, and zero the rest for the hash |
michael@0 | 902 | xor KS, KS |
michael@0 | 903 | .LEncCpy2: |
michael@0 | 904 | cmp KS, len |
michael@0 | 905 | je .LEncCpy3 |
michael@0 | 906 | movb (%rsp, KS, 1), %r8b |
michael@0 | 907 | movb %r8b, (CT, KS, 1) |
michael@0 | 908 | inc KS |
michael@0 | 909 | jmp .LEncCpy2 |
michael@0 | 910 | .LEncCpy3: |
michael@0 | 911 | cmp $16, KS |
michael@0 | 912 | je .LEndCpy3 |
michael@0 | 913 | movb $0, (%rsp, KS, 1) |
michael@0 | 914 | inc KS |
michael@0 | 915 | jmp .LEncCpy3 |
michael@0 | 916 | .LEndCpy3: |
michael@0 | 917 | vmovdqa (%rsp), TMP0 |
michael@0 | 918 | |
michael@0 | 919 | vpshufb .Lbswap_mask(%rip), TMP0, TMP0 |
michael@0 | 920 | vpxor TMP0, T, T |
michael@0 | 921 | vmovdqu (Htbl), TMP0 |
michael@0 | 922 | call GFMUL |
michael@0 | 923 | |
michael@0 | 924 | DATA_END: |
michael@0 | 925 | |
michael@0 | 926 | vpshufb .Lbswap_mask(%rip), T, T |
michael@0 | 927 | vpshufb .Lbswap_mask(%rip), CTR, CTR |
michael@0 | 928 | vmovdqu T, 272(Gctx) |
michael@0 | 929 | vmovdqu CTR, 288(Gctx) |
michael@0 | 930 | |
michael@0 | 931 | movq %rbp, %rsp |
michael@0 | 932 | |
michael@0 | 933 | popq %rbx |
michael@0 | 934 | popq %rbp |
michael@0 | 935 | ret |
michael@0 | 936 | .size intel_aes_gcmENC, .-intel_aes_gcmENC |
michael@0 | 937 | |
michael@0 | 938 | ######################### |
michael@0 | 939 | # Decrypt and Authenticate |
michael@0 | 940 | # void intel_aes_gcmDEC(uint8_t* PT, uint8_t* CT, void *Gctx,uint64_t len); |
michael@0 | 941 | .type intel_aes_gcmDEC,@function |
michael@0 | 942 | .globl intel_aes_gcmDEC |
michael@0 | 943 | .align 16 |
michael@0 | 944 | intel_aes_gcmDEC: |
michael@0 | 945 | # parameter 1: CT # input |
michael@0 | 946 | # parameter 2: PT # output |
michael@0 | 947 | # parameter 3: %rdx # Gctx |
michael@0 | 948 | # parameter 4: %rcx # len |
michael@0 | 949 | |
michael@0 | 950 | .macro DEC_KARATSUBA i |
michael@0 | 951 | vmovdqu (7-\i)*16(CT), TMP5 |
michael@0 | 952 | vpshufb .Lbswap_mask(%rip), TMP5, TMP5 |
michael@0 | 953 | |
michael@0 | 954 | vpclmulqdq $0x11, 16*\i(Htbl), TMP5, TMP3 |
michael@0 | 955 | vpxor TMP3, TMP1, TMP1 |
michael@0 | 956 | vpclmulqdq $0x00, 16*\i(Htbl), TMP5, TMP3 |
michael@0 | 957 | vpxor TMP3, TMP2, TMP2 |
michael@0 | 958 | vpshufd $78, TMP5, TMP3 |
michael@0 | 959 | vpxor TMP5, TMP3, TMP5 |
michael@0 | 960 | vpclmulqdq $0x00, 128+\i*16(Htbl), TMP5, TMP3 |
michael@0 | 961 | vpxor TMP3, TMP0, TMP0 |
michael@0 | 962 | .endm |
michael@0 | 963 | |
michael@0 | 964 | .set PT,%rsi |
michael@0 | 965 | .set CT,%rdi |
michael@0 | 966 | .set Htbl, %rdx |
michael@0 | 967 | .set len, %rcx |
michael@0 | 968 | .set KS,%r9 |
michael@0 | 969 | .set NR,%r10d |
michael@0 | 970 | |
michael@0 | 971 | .set Gctx, %rdx |
michael@0 | 972 | |
michael@0 | 973 | .set T,%xmm0 |
michael@0 | 974 | .set TMP0,%xmm1 |
michael@0 | 975 | .set TMP1,%xmm2 |
michael@0 | 976 | .set TMP2,%xmm3 |
michael@0 | 977 | .set TMP3,%xmm4 |
michael@0 | 978 | .set TMP4,%xmm5 |
michael@0 | 979 | .set TMP5,%xmm6 |
michael@0 | 980 | .set CTR0,%xmm7 |
michael@0 | 981 | .set CTR1,%xmm8 |
michael@0 | 982 | .set CTR2,%xmm9 |
michael@0 | 983 | .set CTR3,%xmm10 |
michael@0 | 984 | .set CTR4,%xmm11 |
michael@0 | 985 | .set CTR5,%xmm12 |
michael@0 | 986 | .set CTR6,%xmm13 |
michael@0 | 987 | .set CTR7,%xmm14 |
michael@0 | 988 | .set CTR,%xmm15 |
michael@0 | 989 | |
michael@0 | 990 | test len, len |
michael@0 | 991 | jnz .LbeginDec |
michael@0 | 992 | ret |
michael@0 | 993 | |
michael@0 | 994 | .LbeginDec: |
michael@0 | 995 | |
michael@0 | 996 | pushq %rbp |
michael@0 | 997 | pushq %rbx |
michael@0 | 998 | movq %rsp, %rbp |
michael@0 | 999 | sub $128, %rsp |
michael@0 | 1000 | andq $-16, %rsp |
michael@0 | 1001 | vmovdqu 288(Gctx), CTR |
michael@0 | 1002 | vmovdqu 272(Gctx), T |
michael@0 | 1003 | mov 304(Gctx), KS |
michael@0 | 1004 | mov 4(KS), NR |
michael@0 | 1005 | lea 48(KS), KS |
michael@0 | 1006 | |
michael@0 | 1007 | vpshufb .Lbswap_mask(%rip), CTR, CTR |
michael@0 | 1008 | vpshufb .Lbswap_mask(%rip), T, T |
michael@0 | 1009 | |
michael@0 | 1010 | vmovdqu .Lbswap_mask(%rip), TMP3 |
michael@0 | 1011 | jmp .LDECOctets |
michael@0 | 1012 | |
michael@0 | 1013 | # Decrypt 8 blocks each time while hashing them at the same time |
michael@0 | 1014 | .align 64 |
michael@0 | 1015 | .LDECOctets: |
michael@0 | 1016 | |
michael@0 | 1017 | cmp $128, len |
michael@0 | 1018 | jb .LDECSingles |
michael@0 | 1019 | sub $128, len |
michael@0 | 1020 | |
michael@0 | 1021 | vmovdqa CTR, CTR0 |
michael@0 | 1022 | vpaddd .Lone(%rip), CTR0, CTR1 |
michael@0 | 1023 | vpaddd .Ltwo(%rip), CTR0, CTR2 |
michael@0 | 1024 | vpaddd .Lone(%rip), CTR2, CTR3 |
michael@0 | 1025 | vpaddd .Ltwo(%rip), CTR2, CTR4 |
michael@0 | 1026 | vpaddd .Lone(%rip), CTR4, CTR5 |
michael@0 | 1027 | vpaddd .Ltwo(%rip), CTR4, CTR6 |
michael@0 | 1028 | vpaddd .Lone(%rip), CTR6, CTR7 |
michael@0 | 1029 | vpaddd .Ltwo(%rip), CTR6, CTR |
michael@0 | 1030 | |
michael@0 | 1031 | vpshufb TMP3, CTR0, CTR0 |
michael@0 | 1032 | vpshufb TMP3, CTR1, CTR1 |
michael@0 | 1033 | vpshufb TMP3, CTR2, CTR2 |
michael@0 | 1034 | vpshufb TMP3, CTR3, CTR3 |
michael@0 | 1035 | vpshufb TMP3, CTR4, CTR4 |
michael@0 | 1036 | vpshufb TMP3, CTR5, CTR5 |
michael@0 | 1037 | vpshufb TMP3, CTR6, CTR6 |
michael@0 | 1038 | vpshufb TMP3, CTR7, CTR7 |
michael@0 | 1039 | |
michael@0 | 1040 | vmovdqu (KS), TMP3 |
michael@0 | 1041 | vpxor TMP3, CTR0, CTR0 |
michael@0 | 1042 | vpxor TMP3, CTR1, CTR1 |
michael@0 | 1043 | vpxor TMP3, CTR2, CTR2 |
michael@0 | 1044 | vpxor TMP3, CTR3, CTR3 |
michael@0 | 1045 | vpxor TMP3, CTR4, CTR4 |
michael@0 | 1046 | vpxor TMP3, CTR5, CTR5 |
michael@0 | 1047 | vpxor TMP3, CTR6, CTR6 |
michael@0 | 1048 | vpxor TMP3, CTR7, CTR7 |
michael@0 | 1049 | |
michael@0 | 1050 | vmovdqu 7*16(CT), TMP5 |
michael@0 | 1051 | vpshufb .Lbswap_mask(%rip), TMP5, TMP5 |
michael@0 | 1052 | vmovdqu 16*0(Htbl), TMP3 |
michael@0 | 1053 | vpclmulqdq $0x11, TMP3, TMP5, TMP1 |
michael@0 | 1054 | vpclmulqdq $0x00, TMP3, TMP5, TMP2 |
michael@0 | 1055 | vpshufd $78, TMP5, TMP3 |
michael@0 | 1056 | vpxor TMP5, TMP3, TMP5 |
michael@0 | 1057 | vmovdqu 128+0*16(Htbl), TMP3 |
michael@0 | 1058 | vpclmulqdq $0x00, TMP3, TMP5, TMP0 |
michael@0 | 1059 | |
michael@0 | 1060 | ROUND 1 |
michael@0 | 1061 | DEC_KARATSUBA 1 |
michael@0 | 1062 | |
michael@0 | 1063 | ROUND 2 |
michael@0 | 1064 | DEC_KARATSUBA 2 |
michael@0 | 1065 | |
michael@0 | 1066 | ROUND 3 |
michael@0 | 1067 | DEC_KARATSUBA 3 |
michael@0 | 1068 | |
michael@0 | 1069 | ROUND 4 |
michael@0 | 1070 | DEC_KARATSUBA 4 |
michael@0 | 1071 | |
michael@0 | 1072 | ROUND 5 |
michael@0 | 1073 | DEC_KARATSUBA 5 |
michael@0 | 1074 | |
michael@0 | 1075 | ROUND 6 |
michael@0 | 1076 | DEC_KARATSUBA 6 |
michael@0 | 1077 | |
michael@0 | 1078 | ROUND 7 |
michael@0 | 1079 | |
michael@0 | 1080 | vmovdqu 0*16(CT), TMP5 |
michael@0 | 1081 | vpshufb .Lbswap_mask(%rip), TMP5, TMP5 |
michael@0 | 1082 | vpxor T, TMP5, TMP5 |
michael@0 | 1083 | vmovdqu 16*7(Htbl), TMP4 |
michael@0 | 1084 | |
michael@0 | 1085 | vpclmulqdq $0x11, TMP4, TMP5, TMP3 |
michael@0 | 1086 | vpxor TMP3, TMP1, TMP1 |
michael@0 | 1087 | vpclmulqdq $0x00, TMP4, TMP5, TMP3 |
michael@0 | 1088 | vpxor TMP3, TMP2, TMP2 |
michael@0 | 1089 | |
michael@0 | 1090 | vpshufd $78, TMP5, TMP3 |
michael@0 | 1091 | vpxor TMP5, TMP3, TMP5 |
michael@0 | 1092 | vmovdqu 128+7*16(Htbl), TMP4 |
michael@0 | 1093 | |
michael@0 | 1094 | vpclmulqdq $0x00, TMP4, TMP5, TMP3 |
michael@0 | 1095 | vpxor TMP3, TMP0, TMP0 |
michael@0 | 1096 | |
michael@0 | 1097 | ROUND 8 |
michael@0 | 1098 | |
michael@0 | 1099 | vpxor TMP1, TMP0, TMP0 |
michael@0 | 1100 | vpxor TMP2, TMP0, TMP0 |
michael@0 | 1101 | |
michael@0 | 1102 | vpsrldq $8, TMP0, TMP3 |
michael@0 | 1103 | vpxor TMP3, TMP1, TMP4 |
michael@0 | 1104 | vpslldq $8, TMP0, TMP3 |
michael@0 | 1105 | vpxor TMP3, TMP2, T |
michael@0 | 1106 | vmovdqa .Lpoly(%rip), TMP2 |
michael@0 | 1107 | |
michael@0 | 1108 | vpalignr $8, T, T, TMP1 |
michael@0 | 1109 | vpclmulqdq $0x10, TMP2, T, T |
michael@0 | 1110 | vpxor T, TMP1, T |
michael@0 | 1111 | |
michael@0 | 1112 | ROUND 9 |
michael@0 | 1113 | |
michael@0 | 1114 | vpalignr $8, T, T, TMP1 |
michael@0 | 1115 | vpclmulqdq $0x10, TMP2, T, T |
michael@0 | 1116 | vpxor T, TMP1, T |
michael@0 | 1117 | |
michael@0 | 1118 | vmovdqu 160(KS), TMP5 |
michael@0 | 1119 | cmp $10, NR |
michael@0 | 1120 | |
michael@0 | 1121 | jbe .LDECLast1 |
michael@0 | 1122 | |
michael@0 | 1123 | ROUND 10 |
michael@0 | 1124 | ROUND 11 |
michael@0 | 1125 | |
michael@0 | 1126 | vmovdqu 192(KS), TMP5 |
michael@0 | 1127 | cmp $12, NR |
michael@0 | 1128 | |
michael@0 | 1129 | jbe .LDECLast1 |
michael@0 | 1130 | |
michael@0 | 1131 | ROUND 12 |
michael@0 | 1132 | ROUND 13 |
michael@0 | 1133 | |
michael@0 | 1134 | vmovdqu 224(KS), TMP5 |
michael@0 | 1135 | |
michael@0 | 1136 | .LDECLast1: |
michael@0 | 1137 | |
michael@0 | 1138 | vpxor (CT), TMP5, TMP3 |
michael@0 | 1139 | vaesenclast TMP3, CTR0, CTR0 |
michael@0 | 1140 | vpxor 16(CT), TMP5, TMP3 |
michael@0 | 1141 | vaesenclast TMP3, CTR1, CTR1 |
michael@0 | 1142 | vpxor 32(CT), TMP5, TMP3 |
michael@0 | 1143 | vaesenclast TMP3, CTR2, CTR2 |
michael@0 | 1144 | vpxor 48(CT), TMP5, TMP3 |
michael@0 | 1145 | vaesenclast TMP3, CTR3, CTR3 |
michael@0 | 1146 | vpxor 64(CT), TMP5, TMP3 |
michael@0 | 1147 | vaesenclast TMP3, CTR4, CTR4 |
michael@0 | 1148 | vpxor 80(CT), TMP5, TMP3 |
michael@0 | 1149 | vaesenclast TMP3, CTR5, CTR5 |
michael@0 | 1150 | vpxor 96(CT), TMP5, TMP3 |
michael@0 | 1151 | vaesenclast TMP3, CTR6, CTR6 |
michael@0 | 1152 | vpxor 112(CT), TMP5, TMP3 |
michael@0 | 1153 | vaesenclast TMP3, CTR7, CTR7 |
michael@0 | 1154 | |
michael@0 | 1155 | vmovdqu .Lbswap_mask(%rip), TMP3 |
michael@0 | 1156 | |
michael@0 | 1157 | vmovdqu CTR0, (PT) |
michael@0 | 1158 | vmovdqu CTR1, 16(PT) |
michael@0 | 1159 | vmovdqu CTR2, 32(PT) |
michael@0 | 1160 | vmovdqu CTR3, 48(PT) |
michael@0 | 1161 | vmovdqu CTR4, 64(PT) |
michael@0 | 1162 | vmovdqu CTR5, 80(PT) |
michael@0 | 1163 | vmovdqu CTR6, 96(PT) |
michael@0 | 1164 | vmovdqu CTR7,112(PT) |
michael@0 | 1165 | |
michael@0 | 1166 | vpxor TMP4, T, T |
michael@0 | 1167 | |
michael@0 | 1168 | lea 128(CT), CT |
michael@0 | 1169 | lea 128(PT), PT |
michael@0 | 1170 | jmp .LDECOctets |
michael@0 | 1171 | |
michael@0 | 1172 | #Here we decrypt and hash any remaining whole block |
michael@0 | 1173 | .LDECSingles: |
michael@0 | 1174 | |
michael@0 | 1175 | cmp $16, len |
michael@0 | 1176 | jb .LDECTail |
michael@0 | 1177 | sub $16, len |
michael@0 | 1178 | |
michael@0 | 1179 | vmovdqu (CT), TMP1 |
michael@0 | 1180 | vpshufb .Lbswap_mask(%rip), TMP1, TMP1 |
michael@0 | 1181 | vpxor TMP1, T, T |
michael@0 | 1182 | vmovdqu (Htbl), TMP0 |
michael@0 | 1183 | call GFMUL |
michael@0 | 1184 | |
michael@0 | 1185 | |
michael@0 | 1186 | vpshufb .Lbswap_mask(%rip), CTR, TMP1 |
michael@0 | 1187 | vpaddd .Lone(%rip), CTR, CTR |
michael@0 | 1188 | |
michael@0 | 1189 | vpxor (KS), TMP1, TMP1 |
michael@0 | 1190 | vaesenc 16*1(KS), TMP1, TMP1 |
michael@0 | 1191 | vaesenc 16*2(KS), TMP1, TMP1 |
michael@0 | 1192 | vaesenc 16*3(KS), TMP1, TMP1 |
michael@0 | 1193 | vaesenc 16*4(KS), TMP1, TMP1 |
michael@0 | 1194 | vaesenc 16*5(KS), TMP1, TMP1 |
michael@0 | 1195 | vaesenc 16*6(KS), TMP1, TMP1 |
michael@0 | 1196 | vaesenc 16*7(KS), TMP1, TMP1 |
michael@0 | 1197 | vaesenc 16*8(KS), TMP1, TMP1 |
michael@0 | 1198 | vaesenc 16*9(KS), TMP1, TMP1 |
michael@0 | 1199 | vmovdqu 16*10(KS), TMP2 |
michael@0 | 1200 | cmp $10, NR |
michael@0 | 1201 | je .LDECLast2 |
michael@0 | 1202 | vaesenc 16*10(KS), TMP1, TMP1 |
michael@0 | 1203 | vaesenc 16*11(KS), TMP1, TMP1 |
michael@0 | 1204 | vmovdqu 16*12(KS), TMP2 |
michael@0 | 1205 | cmp $12, NR |
michael@0 | 1206 | je .LDECLast2 |
michael@0 | 1207 | vaesenc 16*12(KS), TMP1, TMP1 |
michael@0 | 1208 | vaesenc 16*13(KS), TMP1, TMP1 |
michael@0 | 1209 | vmovdqu 16*14(KS), TMP2 |
michael@0 | 1210 | .LDECLast2: |
michael@0 | 1211 | vaesenclast TMP2, TMP1, TMP1 |
michael@0 | 1212 | |
michael@0 | 1213 | vpxor (CT), TMP1, TMP1 |
michael@0 | 1214 | vmovdqu TMP1, (PT) |
michael@0 | 1215 | addq $16, CT |
michael@0 | 1216 | addq $16, PT |
michael@0 | 1217 | jmp .LDECSingles |
michael@0 | 1218 | |
michael@0 | 1219 | #Here we decrypt the final partial block, if there is one |
michael@0 | 1220 | .LDECTail: |
michael@0 | 1221 | test len, len |
michael@0 | 1222 | jz .LDEC_END |
michael@0 | 1223 | |
michael@0 | 1224 | vpshufb .Lbswap_mask(%rip), CTR, TMP1 |
michael@0 | 1225 | vpaddd .Lone(%rip), CTR, CTR |
michael@0 | 1226 | |
michael@0 | 1227 | vpxor (KS), TMP1, TMP1 |
michael@0 | 1228 | vaesenc 16*1(KS), TMP1, TMP1 |
michael@0 | 1229 | vaesenc 16*2(KS), TMP1, TMP1 |
michael@0 | 1230 | vaesenc 16*3(KS), TMP1, TMP1 |
michael@0 | 1231 | vaesenc 16*4(KS), TMP1, TMP1 |
michael@0 | 1232 | vaesenc 16*5(KS), TMP1, TMP1 |
michael@0 | 1233 | vaesenc 16*6(KS), TMP1, TMP1 |
michael@0 | 1234 | vaesenc 16*7(KS), TMP1, TMP1 |
michael@0 | 1235 | vaesenc 16*8(KS), TMP1, TMP1 |
michael@0 | 1236 | vaesenc 16*9(KS), TMP1, TMP1 |
michael@0 | 1237 | vmovdqu 16*10(KS), TMP2 |
michael@0 | 1238 | cmp $10, NR |
michael@0 | 1239 | je .LDECLast3 |
michael@0 | 1240 | vaesenc 16*10(KS), TMP1, TMP1 |
michael@0 | 1241 | vaesenc 16*11(KS), TMP1, TMP1 |
michael@0 | 1242 | vmovdqu 16*12(KS), TMP2 |
michael@0 | 1243 | cmp $12, NR |
michael@0 | 1244 | je .LDECLast3 |
michael@0 | 1245 | vaesenc 16*12(KS), TMP1, TMP1 |
michael@0 | 1246 | vaesenc 16*13(KS), TMP1, TMP1 |
michael@0 | 1247 | vmovdqu 16*14(KS), TMP2 |
michael@0 | 1248 | |
michael@0 | 1249 | .LDECLast3: |
michael@0 | 1250 | vaesenclast TMP2, TMP1, TMP1 |
michael@0 | 1251 | |
michael@0 | 1252 | vpxor TMP2, TMP2, TMP2 |
michael@0 | 1253 | vmovdqa TMP2, (%rsp) |
michael@0 | 1254 | # Copy the required bytes only (could probably use rep movsb) |
michael@0 | 1255 | xor KS, KS |
michael@0 | 1256 | .LDecCpy: |
michael@0 | 1257 | cmp KS, len |
michael@0 | 1258 | je .LDecCpy2 |
michael@0 | 1259 | movb (CT, KS, 1), %r8b |
michael@0 | 1260 | movb %r8b, (%rsp, KS, 1) |
michael@0 | 1261 | inc KS |
michael@0 | 1262 | jmp .LDecCpy |
michael@0 | 1263 | .LDecCpy2: |
michael@0 | 1264 | cmp $16, KS |
michael@0 | 1265 | je .LDecCpyEnd |
michael@0 | 1266 | movb $0, (%rsp, KS, 1) |
michael@0 | 1267 | inc KS |
michael@0 | 1268 | jmp .LDecCpy2 |
michael@0 | 1269 | .LDecCpyEnd: |
michael@0 | 1270 | # Xor with the counter block |
michael@0 | 1271 | vmovdqa (%rsp), TMP0 |
michael@0 | 1272 | vpxor TMP0, TMP1, TMP1 |
michael@0 | 1273 | # Again, store at temp location |
michael@0 | 1274 | vmovdqa TMP1, (%rsp) |
michael@0 | 1275 | # Copy only the required bytes to PT, and zero the rest for the hash |
michael@0 | 1276 | xor KS, KS |
michael@0 | 1277 | .LDecCpy3: |
michael@0 | 1278 | cmp KS, len |
michael@0 | 1279 | je .LDecCpyEnd3 |
michael@0 | 1280 | movb (%rsp, KS, 1), %r8b |
michael@0 | 1281 | movb %r8b, (PT, KS, 1) |
michael@0 | 1282 | inc KS |
michael@0 | 1283 | jmp .LDecCpy3 |
michael@0 | 1284 | .LDecCpyEnd3: |
michael@0 | 1285 | vpshufb .Lbswap_mask(%rip), TMP0, TMP0 |
michael@0 | 1286 | vpxor TMP0, T, T |
michael@0 | 1287 | vmovdqu (Htbl), TMP0 |
michael@0 | 1288 | call GFMUL |
michael@0 | 1289 | .LDEC_END: |
michael@0 | 1290 | |
michael@0 | 1291 | vpshufb .Lbswap_mask(%rip), T, T |
michael@0 | 1292 | vpshufb .Lbswap_mask(%rip), CTR, CTR |
michael@0 | 1293 | vmovdqu T, 272(Gctx) |
michael@0 | 1294 | vmovdqu CTR, 288(Gctx) |
michael@0 | 1295 | |
michael@0 | 1296 | movq %rbp, %rsp |
michael@0 | 1297 | |
michael@0 | 1298 | popq %rbx |
michael@0 | 1299 | popq %rbp |
michael@0 | 1300 | ret |
michael@0 | 1301 | .size intel_aes_gcmDEC, .-intel_aes_gcmDEC |
michael@0 | 1302 | ######################### |
michael@0 | 1303 | # a = T |
michael@0 | 1304 | # b = TMP0 - remains unchanged |
michael@0 | 1305 | # res = T |
michael@0 | 1306 | # uses also TMP1,TMP2,TMP3,TMP4 |
michael@0 | 1307 | # __m128i GFMUL(__m128i A, __m128i B); |
michael@0 | 1308 | .type GFMUL,@function |
michael@0 | 1309 | .globl GFMUL |
michael@0 | 1310 | GFMUL: |
michael@0 | 1311 | vpclmulqdq $0x00, TMP0, T, TMP1 |
michael@0 | 1312 | vpclmulqdq $0x11, TMP0, T, TMP4 |
michael@0 | 1313 | |
michael@0 | 1314 | vpshufd $78, T, TMP2 |
michael@0 | 1315 | vpshufd $78, TMP0, TMP3 |
michael@0 | 1316 | vpxor T, TMP2, TMP2 |
michael@0 | 1317 | vpxor TMP0, TMP3, TMP3 |
michael@0 | 1318 | |
michael@0 | 1319 | vpclmulqdq $0x00, TMP3, TMP2, TMP2 |
michael@0 | 1320 | vpxor TMP1, TMP2, TMP2 |
michael@0 | 1321 | vpxor TMP4, TMP2, TMP2 |
michael@0 | 1322 | |
michael@0 | 1323 | vpslldq $8, TMP2, TMP3 |
michael@0 | 1324 | vpsrldq $8, TMP2, TMP2 |
michael@0 | 1325 | |
michael@0 | 1326 | vpxor TMP3, TMP1, TMP1 |
michael@0 | 1327 | vpxor TMP2, TMP4, TMP4 |
michael@0 | 1328 | |
michael@0 | 1329 | vpclmulqdq $0x10, .Lpoly(%rip), TMP1, TMP2 |
michael@0 | 1330 | vpshufd $78, TMP1, TMP3 |
michael@0 | 1331 | vpxor TMP3, TMP2, TMP1 |
michael@0 | 1332 | |
michael@0 | 1333 | vpclmulqdq $0x10, .Lpoly(%rip), TMP1, TMP2 |
michael@0 | 1334 | vpshufd $78, TMP1, TMP3 |
michael@0 | 1335 | vpxor TMP3, TMP2, TMP1 |
michael@0 | 1336 | |
michael@0 | 1337 | vpxor TMP4, TMP1, T |
michael@0 | 1338 | ret |
michael@0 | 1339 | .size GFMUL, .-GFMUL |
michael@0 | 1340 |