media/libvpx/third_party/x86inc/x86inc.asm

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/media/libvpx/third_party/x86inc/x86inc.asm	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,1199 @@
     1.4 +;*****************************************************************************
     1.5 +;* x86inc.asm: x264asm abstraction layer
     1.6 +;*****************************************************************************
     1.7 +;* Copyright (C) 2005-2012 x264 project
     1.8 +;*
     1.9 +;* Authors: Loren Merritt <lorenm@u.washington.edu>
    1.10 +;*          Anton Mitrofanov <BugMaster@narod.ru>
    1.11 +;*          Jason Garrett-Glaser <darkshikari@gmail.com>
    1.12 +;*          Henrik Gramner <hengar-6@student.ltu.se>
    1.13 +;*
    1.14 +;* Permission to use, copy, modify, and/or distribute this software for any
    1.15 +;* purpose with or without fee is hereby granted, provided that the above
    1.16 +;* copyright notice and this permission notice appear in all copies.
    1.17 +;*
    1.18 +;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
    1.19 +;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    1.20 +;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    1.21 +;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    1.22 +;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    1.23 +;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    1.24 +;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    1.25 +;*****************************************************************************
    1.26 +
    1.27 +; This is a header file for the x264ASM assembly language, which uses
    1.28 +; NASM/YASM syntax combined with a large number of macros to provide easy
    1.29 +; abstraction between different calling conventions (x86_32, win64, linux64).
    1.30 +; It also has various other useful features to simplify writing the kind of
    1.31 +; DSP functions that are most often used in x264.
    1.32 +
    1.33 +; Unlike the rest of x264, this file is available under an ISC license, as it
    1.34 +; has significant usefulness outside of x264 and we want it to be available
    1.35 +; to the largest audience possible.  Of course, if you modify it for your own
    1.36 +; purposes to add a new feature, we strongly encourage contributing a patch
    1.37 +; as this feature might be useful for others as well.  Send patches or ideas
    1.38 +; to x264-devel@videolan.org .
    1.39 +
    1.40 +%include "vpx_config.asm"
    1.41 +
    1.42 +%define program_name vp9
    1.43 +
    1.44 +
    1.45 +%define UNIX64 0
    1.46 +%define WIN64  0
    1.47 +%if ARCH_X86_64
    1.48 +    %ifidn __OUTPUT_FORMAT__,win32
    1.49 +        %define WIN64  1
    1.50 +    %elifidn __OUTPUT_FORMAT__,win64
    1.51 +        %define WIN64  1
    1.52 +    %elifidn __OUTPUT_FORMAT__,x64
    1.53 +        %define WIN64  1
    1.54 +    %else
    1.55 +        %define UNIX64 1
    1.56 +    %endif
    1.57 +%endif
    1.58 +
    1.59 +%ifidn   __OUTPUT_FORMAT__,elf32
    1.60 +    %define mangle(x) x
    1.61 +%elifidn __OUTPUT_FORMAT__,elf64
    1.62 +    %define mangle(x) x
    1.63 +%elifidn __OUTPUT_FORMAT__,elf
    1.64 +    %define mangle(x) x
    1.65 +%elifidn __OUTPUT_FORMAT__,x64
    1.66 +    %define mangle(x) x
    1.67 +%elifidn __OUTPUT_FORMAT__,win64
    1.68 +    %define mangle(x) x
    1.69 +%else
    1.70 +    %define mangle(x) _ %+ x
    1.71 +%endif
    1.72 +
    1.73 +; FIXME: All of the 64bit asm functions that take a stride as an argument
    1.74 +; via register, assume that the high dword of that register is filled with 0.
    1.75 +; This is true in practice (since we never do any 64bit arithmetic on strides,
    1.76 +; and x264's strides are all positive), but is not guaranteed by the ABI.
    1.77 +
    1.78 +; Name of the .rodata section.
    1.79 +; Kludge: Something on OS X fails to align .rodata even given an align attribute,
    1.80 +; so use a different read-only section.
    1.81 +%macro SECTION_RODATA 0-1 16
    1.82 +    %ifidn __OUTPUT_FORMAT__,macho64
    1.83 +        SECTION .text align=%1
    1.84 +    %elifidn __OUTPUT_FORMAT__,macho
    1.85 +        SECTION .text align=%1
    1.86 +        fakegot:
    1.87 +    %elifidn __OUTPUT_FORMAT__,aout
    1.88 +        section .text
    1.89 +    %else
    1.90 +        SECTION .rodata align=%1
    1.91 +    %endif
    1.92 +%endmacro
    1.93 +
    1.94 +; aout does not support align=
    1.95 +%macro SECTION_TEXT 0-1 16
    1.96 +    %ifidn __OUTPUT_FORMAT__,aout
    1.97 +        SECTION .text
    1.98 +    %else
    1.99 +        SECTION .text align=%1
   1.100 +    %endif
   1.101 +%endmacro
   1.102 +
   1.103 +; PIC macros are copied from vpx_ports/x86_abi_support.asm. The "define PIC"
   1.104 +; from original code is added in for 64bit.
   1.105 +%ifidn __OUTPUT_FORMAT__,elf32
   1.106 +%define ABI_IS_32BIT 1
   1.107 +%elifidn __OUTPUT_FORMAT__,macho32
   1.108 +%define ABI_IS_32BIT 1
   1.109 +%elifidn __OUTPUT_FORMAT__,win32
   1.110 +%define ABI_IS_32BIT 1
   1.111 +%elifidn __OUTPUT_FORMAT__,aout
   1.112 +%define ABI_IS_32BIT 1
   1.113 +%else
   1.114 +%define ABI_IS_32BIT 0
   1.115 +%endif
   1.116 +
   1.117 +%if ABI_IS_32BIT
   1.118 +  %if CONFIG_PIC=1
   1.119 +  %ifidn __OUTPUT_FORMAT__,elf32
   1.120 +    %define GET_GOT_SAVE_ARG 1
   1.121 +    %define WRT_PLT wrt ..plt
   1.122 +    %macro GET_GOT 1
   1.123 +      extern _GLOBAL_OFFSET_TABLE_
   1.124 +      push %1
   1.125 +      call %%get_got
   1.126 +      %%sub_offset:
   1.127 +      jmp %%exitGG
   1.128 +      %%get_got:
   1.129 +      mov %1, [esp]
   1.130 +      add %1, _GLOBAL_OFFSET_TABLE_ + $$ - %%sub_offset wrt ..gotpc
   1.131 +      ret
   1.132 +      %%exitGG:
   1.133 +      %undef GLOBAL
   1.134 +      %define GLOBAL(x) x + %1 wrt ..gotoff
   1.135 +      %undef RESTORE_GOT
   1.136 +      %define RESTORE_GOT pop %1
   1.137 +    %endmacro
   1.138 +  %elifidn __OUTPUT_FORMAT__,macho32
   1.139 +    %define GET_GOT_SAVE_ARG 1
   1.140 +    %macro GET_GOT 1
   1.141 +      push %1
   1.142 +      call %%get_got
   1.143 +      %%get_got:
   1.144 +      pop  %1
   1.145 +      %undef GLOBAL
   1.146 +      %define GLOBAL(x) x + %1 - %%get_got
   1.147 +      %undef RESTORE_GOT
   1.148 +      %define RESTORE_GOT pop %1
   1.149 +    %endmacro
   1.150 +  %endif
   1.151 +  %endif
   1.152 +
   1.153 +  %if ARCH_X86_64 == 0
   1.154 +    %undef PIC
   1.155 +  %endif
   1.156 +
   1.157 +%else
   1.158 +  %macro GET_GOT 1
   1.159 +  %endmacro
   1.160 +  %define GLOBAL(x) rel x
   1.161 +  %define WRT_PLT wrt ..plt
   1.162 +
   1.163 +  %if WIN64
   1.164 +    %define PIC
   1.165 +  %elifidn __OUTPUT_FORMAT__,macho64
   1.166 +    %define PIC
   1.167 +  %elif CONFIG_PIC
   1.168 +    %define PIC
   1.169 +  %endif
   1.170 +%endif
   1.171 +
   1.172 +%ifnmacro GET_GOT
   1.173 +    %macro GET_GOT 1
   1.174 +    %endmacro
   1.175 +    %define GLOBAL(x) x
   1.176 +%endif
   1.177 +%ifndef RESTORE_GOT
   1.178 +%define RESTORE_GOT
   1.179 +%endif
   1.180 +%ifndef WRT_PLT
   1.181 +%define WRT_PLT
   1.182 +%endif
   1.183 +
   1.184 +%ifdef PIC
   1.185 +    default rel
   1.186 +%endif
   1.187 +; Done with PIC macros
   1.188 +
   1.189 +; Always use long nops (reduces 0x90 spam in disassembly on x86_32)
   1.190 +%ifndef __NASM_VER__
   1.191 +CPU amdnop
   1.192 +%else
   1.193 +%use smartalign
   1.194 +ALIGNMODE k7
   1.195 +%endif
   1.196 +
   1.197 +; Macros to eliminate most code duplication between x86_32 and x86_64:
   1.198 +; Currently this works only for leaf functions which load all their arguments
   1.199 +; into registers at the start, and make no other use of the stack. Luckily that
   1.200 +; covers most of x264's asm.
   1.201 +
   1.202 +; PROLOGUE:
   1.203 +; %1 = number of arguments. loads them from stack if needed.
   1.204 +; %2 = number of registers used. pushes callee-saved regs if needed.
   1.205 +; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
   1.206 +; %4 = list of names to define to registers
   1.207 +; PROLOGUE can also be invoked by adding the same options to cglobal
   1.208 +
   1.209 +; e.g.
   1.210 +; cglobal foo, 2,3,0, dst, src, tmp
   1.211 +; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
   1.212 +
   1.213 +; TODO Some functions can use some args directly from the stack. If they're the
   1.214 +; last args then you can just not declare them, but if they're in the middle
   1.215 +; we need more flexible macro.
   1.216 +
   1.217 +; RET:
   1.218 +; Pops anything that was pushed by PROLOGUE, and returns.
   1.219 +
   1.220 +; REP_RET:
   1.221 +; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
   1.222 +; which are slow when a normal ret follows a branch.
   1.223 +
   1.224 +; registers:
   1.225 +; rN and rNq are the native-size register holding function argument N
   1.226 +; rNd, rNw, rNb are dword, word, and byte size
   1.227 +; rNm is the original location of arg N (a register or on the stack), dword
   1.228 +; rNmp is native size
   1.229 +
   1.230 +%macro DECLARE_REG 5-6
   1.231 +    %define r%1q %2
   1.232 +    %define r%1d %3
   1.233 +    %define r%1w %4
   1.234 +    %define r%1b %5
   1.235 +    %if %0 == 5
   1.236 +        %define r%1m  %3
   1.237 +        %define r%1mp %2
   1.238 +    %elif ARCH_X86_64 ; memory
   1.239 +        %define r%1m [rsp + stack_offset + %6]
   1.240 +        %define r%1mp qword r %+ %1m
   1.241 +    %else
   1.242 +        %define r%1m [esp + stack_offset + %6]
   1.243 +        %define r%1mp dword r %+ %1m
   1.244 +    %endif
   1.245 +    %define r%1  %2
   1.246 +%endmacro
   1.247 +
   1.248 +%macro DECLARE_REG_SIZE 2
   1.249 +    %define r%1q r%1
   1.250 +    %define e%1q r%1
   1.251 +    %define r%1d e%1
   1.252 +    %define e%1d e%1
   1.253 +    %define r%1w %1
   1.254 +    %define e%1w %1
   1.255 +    %define r%1b %2
   1.256 +    %define e%1b %2
   1.257 +%if ARCH_X86_64 == 0
   1.258 +    %define r%1  e%1
   1.259 +%endif
   1.260 +%endmacro
   1.261 +
   1.262 +DECLARE_REG_SIZE ax, al
   1.263 +DECLARE_REG_SIZE bx, bl
   1.264 +DECLARE_REG_SIZE cx, cl
   1.265 +DECLARE_REG_SIZE dx, dl
   1.266 +DECLARE_REG_SIZE si, sil
   1.267 +DECLARE_REG_SIZE di, dil
   1.268 +DECLARE_REG_SIZE bp, bpl
   1.269 +
   1.270 +; t# defines for when per-arch register allocation is more complex than just function arguments
   1.271 +
   1.272 +%macro DECLARE_REG_TMP 1-*
   1.273 +    %assign %%i 0
   1.274 +    %rep %0
   1.275 +        CAT_XDEFINE t, %%i, r%1
   1.276 +        %assign %%i %%i+1
   1.277 +        %rotate 1
   1.278 +    %endrep
   1.279 +%endmacro
   1.280 +
   1.281 +%macro DECLARE_REG_TMP_SIZE 0-*
   1.282 +    %rep %0
   1.283 +        %define t%1q t%1 %+ q
   1.284 +        %define t%1d t%1 %+ d
   1.285 +        %define t%1w t%1 %+ w
   1.286 +        %define t%1b t%1 %+ b
   1.287 +        %rotate 1
   1.288 +    %endrep
   1.289 +%endmacro
   1.290 +
   1.291 +DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
   1.292 +
   1.293 +%if ARCH_X86_64
   1.294 +    %define gprsize 8
   1.295 +%else
   1.296 +    %define gprsize 4
   1.297 +%endif
   1.298 +
   1.299 +%macro PUSH 1
   1.300 +    push %1
   1.301 +    %assign stack_offset stack_offset+gprsize
   1.302 +%endmacro
   1.303 +
   1.304 +%macro POP 1
   1.305 +    pop %1
   1.306 +    %assign stack_offset stack_offset-gprsize
   1.307 +%endmacro
   1.308 +
   1.309 +%macro PUSH_IF_USED 1-*
   1.310 +    %rep %0
   1.311 +        %if %1 < regs_used
   1.312 +            PUSH r%1
   1.313 +        %endif
   1.314 +        %rotate 1
   1.315 +    %endrep
   1.316 +%endmacro
   1.317 +
   1.318 +%macro POP_IF_USED 1-*
   1.319 +    %rep %0
   1.320 +        %if %1 < regs_used
   1.321 +            pop r%1
   1.322 +        %endif
   1.323 +        %rotate 1
   1.324 +    %endrep
   1.325 +%endmacro
   1.326 +
   1.327 +%macro LOAD_IF_USED 1-*
   1.328 +    %rep %0
   1.329 +        %if %1 < num_args
   1.330 +            mov r%1, r %+ %1 %+ mp
   1.331 +        %endif
   1.332 +        %rotate 1
   1.333 +    %endrep
   1.334 +%endmacro
   1.335 +
   1.336 +%macro SUB 2
   1.337 +    sub %1, %2
   1.338 +    %ifidn %1, rsp
   1.339 +        %assign stack_offset stack_offset+(%2)
   1.340 +    %endif
   1.341 +%endmacro
   1.342 +
   1.343 +%macro ADD 2
   1.344 +    add %1, %2
   1.345 +    %ifidn %1, rsp
   1.346 +        %assign stack_offset stack_offset-(%2)
   1.347 +    %endif
   1.348 +%endmacro
   1.349 +
   1.350 +%macro movifnidn 2
   1.351 +    %ifnidn %1, %2
   1.352 +        mov %1, %2
   1.353 +    %endif
   1.354 +%endmacro
   1.355 +
   1.356 +%macro movsxdifnidn 2
   1.357 +    %ifnidn %1, %2
   1.358 +        movsxd %1, %2
   1.359 +    %endif
   1.360 +%endmacro
   1.361 +
   1.362 +%macro ASSERT 1
   1.363 +    %if (%1) == 0
   1.364 +        %error assert failed
   1.365 +    %endif
   1.366 +%endmacro
   1.367 +
   1.368 +%macro DEFINE_ARGS 0-*
   1.369 +    %ifdef n_arg_names
   1.370 +        %assign %%i 0
   1.371 +        %rep n_arg_names
   1.372 +            CAT_UNDEF arg_name %+ %%i, q
   1.373 +            CAT_UNDEF arg_name %+ %%i, d
   1.374 +            CAT_UNDEF arg_name %+ %%i, w
   1.375 +            CAT_UNDEF arg_name %+ %%i, b
   1.376 +            CAT_UNDEF arg_name %+ %%i, m
   1.377 +            CAT_UNDEF arg_name %+ %%i, mp
   1.378 +            CAT_UNDEF arg_name, %%i
   1.379 +            %assign %%i %%i+1
   1.380 +        %endrep
   1.381 +    %endif
   1.382 +
   1.383 +    %xdefine %%stack_offset stack_offset
   1.384 +    %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
   1.385 +    %assign %%i 0
   1.386 +    %rep %0
   1.387 +        %xdefine %1q r %+ %%i %+ q
   1.388 +        %xdefine %1d r %+ %%i %+ d
   1.389 +        %xdefine %1w r %+ %%i %+ w
   1.390 +        %xdefine %1b r %+ %%i %+ b
   1.391 +        %xdefine %1m r %+ %%i %+ m
   1.392 +        %xdefine %1mp r %+ %%i %+ mp
   1.393 +        CAT_XDEFINE arg_name, %%i, %1
   1.394 +        %assign %%i %%i+1
   1.395 +        %rotate 1
   1.396 +    %endrep
   1.397 +    %xdefine stack_offset %%stack_offset
   1.398 +    %assign n_arg_names %0
   1.399 +%endmacro
   1.400 +
   1.401 +%if WIN64 ; Windows x64 ;=================================================
   1.402 +
   1.403 +DECLARE_REG 0,  rcx, ecx,  cx,   cl
   1.404 +DECLARE_REG 1,  rdx, edx,  dx,   dl
   1.405 +DECLARE_REG 2,  R8,  R8D,  R8W,  R8B
   1.406 +DECLARE_REG 3,  R9,  R9D,  R9W,  R9B
   1.407 +DECLARE_REG 4,  R10, R10D, R10W, R10B, 40
   1.408 +DECLARE_REG 5,  R11, R11D, R11W, R11B, 48
   1.409 +DECLARE_REG 6,  rax, eax,  ax,   al,   56
   1.410 +DECLARE_REG 7,  rdi, edi,  di,   dil,  64
   1.411 +DECLARE_REG 8,  rsi, esi,  si,   sil,  72
   1.412 +DECLARE_REG 9,  rbx, ebx,  bx,   bl,   80
   1.413 +DECLARE_REG 10, rbp, ebp,  bp,   bpl,  88
   1.414 +DECLARE_REG 11, R12, R12D, R12W, R12B, 96
   1.415 +DECLARE_REG 12, R13, R13D, R13W, R13B, 104
   1.416 +DECLARE_REG 13, R14, R14D, R14W, R14B, 112
   1.417 +DECLARE_REG 14, R15, R15D, R15W, R15B, 120
   1.418 +
   1.419 +%macro PROLOGUE 2-4+ 0 ; #args, #regs, #xmm_regs, arg_names...
   1.420 +    %assign num_args %1
   1.421 +    %assign regs_used %2
   1.422 +    ASSERT regs_used >= num_args
   1.423 +    ASSERT regs_used <= 15
   1.424 +    PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
   1.425 +    %if mmsize == 8
   1.426 +        %assign xmm_regs_used 0
   1.427 +    %else
   1.428 +        WIN64_SPILL_XMM %3
   1.429 +    %endif
   1.430 +    LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
   1.431 +    DEFINE_ARGS %4
   1.432 +%endmacro
   1.433 +
   1.434 +%macro WIN64_SPILL_XMM 1
   1.435 +    %assign xmm_regs_used %1
   1.436 +    ASSERT xmm_regs_used <= 16
   1.437 +    %if xmm_regs_used > 6
   1.438 +        SUB rsp, (xmm_regs_used-6)*16+16
   1.439 +        %assign %%i xmm_regs_used
   1.440 +        %rep (xmm_regs_used-6)
   1.441 +            %assign %%i %%i-1
   1.442 +            movdqa [rsp + (%%i-6)*16+(~stack_offset&8)], xmm %+ %%i
   1.443 +        %endrep
   1.444 +    %endif
   1.445 +%endmacro
   1.446 +
   1.447 +%macro WIN64_RESTORE_XMM_INTERNAL 1
   1.448 +    %if xmm_regs_used > 6
   1.449 +        %assign %%i xmm_regs_used
   1.450 +        %rep (xmm_regs_used-6)
   1.451 +            %assign %%i %%i-1
   1.452 +            movdqa xmm %+ %%i, [%1 + (%%i-6)*16+(~stack_offset&8)]
   1.453 +        %endrep
   1.454 +        add %1, (xmm_regs_used-6)*16+16
   1.455 +    %endif
   1.456 +%endmacro
   1.457 +
   1.458 +%macro WIN64_RESTORE_XMM 1
   1.459 +    WIN64_RESTORE_XMM_INTERNAL %1
   1.460 +    %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16
   1.461 +    %assign xmm_regs_used 0
   1.462 +%endmacro
   1.463 +
   1.464 +%macro RET 0
   1.465 +    WIN64_RESTORE_XMM_INTERNAL rsp
   1.466 +    POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
   1.467 +    ret
   1.468 +%endmacro
   1.469 +
   1.470 +%macro REP_RET 0
   1.471 +    %if regs_used > 7 || xmm_regs_used > 6
   1.472 +        RET
   1.473 +    %else
   1.474 +        rep ret
   1.475 +    %endif
   1.476 +%endmacro
   1.477 +
   1.478 +%elif ARCH_X86_64 ; *nix x64 ;=============================================
   1.479 +
   1.480 +DECLARE_REG 0,  rdi, edi,  di,   dil
   1.481 +DECLARE_REG 1,  rsi, esi,  si,   sil
   1.482 +DECLARE_REG 2,  rdx, edx,  dx,   dl
   1.483 +DECLARE_REG 3,  rcx, ecx,  cx,   cl
   1.484 +DECLARE_REG 4,  R8,  R8D,  R8W,  R8B
   1.485 +DECLARE_REG 5,  R9,  R9D,  R9W,  R9B
   1.486 +DECLARE_REG 6,  rax, eax,  ax,   al,   8
   1.487 +DECLARE_REG 7,  R10, R10D, R10W, R10B, 16
   1.488 +DECLARE_REG 8,  R11, R11D, R11W, R11B, 24
   1.489 +DECLARE_REG 9,  rbx, ebx,  bx,   bl,   32
   1.490 +DECLARE_REG 10, rbp, ebp,  bp,   bpl,  40
   1.491 +DECLARE_REG 11, R12, R12D, R12W, R12B, 48
   1.492 +DECLARE_REG 12, R13, R13D, R13W, R13B, 56
   1.493 +DECLARE_REG 13, R14, R14D, R14W, R14B, 64
   1.494 +DECLARE_REG 14, R15, R15D, R15W, R15B, 72
   1.495 +
   1.496 +%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
   1.497 +    %assign num_args %1
   1.498 +    %assign regs_used %2
   1.499 +    ASSERT regs_used >= num_args
   1.500 +    ASSERT regs_used <= 15
   1.501 +    PUSH_IF_USED 9, 10, 11, 12, 13, 14
   1.502 +    LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
   1.503 +    DEFINE_ARGS %4
   1.504 +%endmacro
   1.505 +
   1.506 +%macro RET 0
   1.507 +    POP_IF_USED 14, 13, 12, 11, 10, 9
   1.508 +    ret
   1.509 +%endmacro
   1.510 +
   1.511 +%macro REP_RET 0
   1.512 +    %if regs_used > 9
   1.513 +        RET
   1.514 +    %else
   1.515 +        rep ret
   1.516 +    %endif
   1.517 +%endmacro
   1.518 +
   1.519 +%else ; X86_32 ;==============================================================
   1.520 +
   1.521 +DECLARE_REG 0, eax, eax, ax, al,   4
   1.522 +DECLARE_REG 1, ecx, ecx, cx, cl,   8
   1.523 +DECLARE_REG 2, edx, edx, dx, dl,   12
   1.524 +DECLARE_REG 3, ebx, ebx, bx, bl,   16
   1.525 +DECLARE_REG 4, esi, esi, si, null, 20
   1.526 +DECLARE_REG 5, edi, edi, di, null, 24
   1.527 +DECLARE_REG 6, ebp, ebp, bp, null, 28
   1.528 +%define rsp esp
   1.529 +
   1.530 +%macro DECLARE_ARG 1-*
   1.531 +    %rep %0
   1.532 +        %define r%1m [esp + stack_offset + 4*%1 + 4]
   1.533 +        %define r%1mp dword r%1m
   1.534 +        %rotate 1
   1.535 +    %endrep
   1.536 +%endmacro
   1.537 +
   1.538 +DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
   1.539 +
   1.540 +%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
   1.541 +    %assign num_args %1
   1.542 +    %assign regs_used %2
   1.543 +    %if regs_used > 7
   1.544 +        %assign regs_used 7
   1.545 +    %endif
   1.546 +    ASSERT regs_used >= num_args
   1.547 +    PUSH_IF_USED 3, 4, 5, 6
   1.548 +    LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
   1.549 +    DEFINE_ARGS %4
   1.550 +%endmacro
   1.551 +
   1.552 +%macro RET 0
   1.553 +    POP_IF_USED 6, 5, 4, 3
   1.554 +    ret
   1.555 +%endmacro
   1.556 +
   1.557 +%macro REP_RET 0
   1.558 +    %if regs_used > 3
   1.559 +        RET
   1.560 +    %else
   1.561 +        rep ret
   1.562 +    %endif
   1.563 +%endmacro
   1.564 +
   1.565 +%endif ;======================================================================
   1.566 +
   1.567 +%if WIN64 == 0
   1.568 +%macro WIN64_SPILL_XMM 1
   1.569 +%endmacro
   1.570 +%macro WIN64_RESTORE_XMM 1
   1.571 +%endmacro
   1.572 +%endif
   1.573 +
   1.574 +;=============================================================================
   1.575 +; arch-independent part
   1.576 +;=============================================================================
   1.577 +
   1.578 +%assign function_align 16
   1.579 +
   1.580 +; Begin a function.
   1.581 +; Applies any symbol mangling needed for C linkage, and sets up a define such that
   1.582 +; subsequent uses of the function name automatically refer to the mangled version.
   1.583 +; Appends cpuflags to the function name if cpuflags has been specified.
   1.584 +%macro cglobal 1-2+ ; name, [PROLOGUE args]
   1.585 +%if %0 == 1
   1.586 +    cglobal_internal %1 %+ SUFFIX
   1.587 +%else
   1.588 +    cglobal_internal %1 %+ SUFFIX, %2
   1.589 +%endif
   1.590 +%endmacro
   1.591 +%macro cglobal_internal 1-2+
   1.592 +    %ifndef cglobaled_%1
   1.593 +        %xdefine %1 mangle(program_name %+ _ %+ %1)
   1.594 +        %xdefine %1.skip_prologue %1 %+ .skip_prologue
   1.595 +        CAT_XDEFINE cglobaled_, %1, 1
   1.596 +    %endif
   1.597 +    %xdefine current_function %1
   1.598 +    %ifidn __OUTPUT_FORMAT__,elf
   1.599 +        global %1:function hidden
   1.600 +    %elifidn __OUTPUT_FORMAT__,elf32
   1.601 +        global %1:function hidden
   1.602 +    %elifidn __OUTPUT_FORMAT__,elf64
   1.603 +        global %1:function hidden
   1.604 +    %elifidn __OUTPUT_FORMAT__,macho32
   1.605 +        global %1:private_extern
   1.606 +    %elifidn __OUTPUT_FORMAT__,macho64
   1.607 +        global %1:private_extern
   1.608 +    %else
   1.609 +        global %1
   1.610 +    %endif
   1.611 +    align function_align
   1.612 +    %1:
   1.613 +    RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
   1.614 +    %assign stack_offset 0
   1.615 +    %if %0 > 1
   1.616 +        PROLOGUE %2
   1.617 +    %endif
   1.618 +%endmacro
   1.619 +
   1.620 +%macro cextern 1
   1.621 +    %xdefine %1 mangle(program_name %+ _ %+ %1)
   1.622 +    CAT_XDEFINE cglobaled_, %1, 1
   1.623 +    extern %1
   1.624 +%endmacro
   1.625 +
   1.626 +; like cextern, but without the prefix
   1.627 +%macro cextern_naked 1
   1.628 +    %xdefine %1 mangle(%1)
   1.629 +    CAT_XDEFINE cglobaled_, %1, 1
   1.630 +    extern %1
   1.631 +%endmacro
   1.632 +
   1.633 +%macro const 2+
   1.634 +    %xdefine %1 mangle(program_name %+ _ %+ %1)
   1.635 +    global %1
   1.636 +    %1: %2
   1.637 +%endmacro
   1.638 +
   1.639 +; This is needed for ELF, otherwise the GNU linker assumes the stack is
   1.640 +; executable by default.
   1.641 +%ifidn __OUTPUT_FORMAT__,elf
   1.642 +SECTION .note.GNU-stack noalloc noexec nowrite progbits
   1.643 +%elifidn __OUTPUT_FORMAT__,elf32
   1.644 +SECTION .note.GNU-stack noalloc noexec nowrite progbits
   1.645 +%elifidn __OUTPUT_FORMAT__,elf64
   1.646 +SECTION .note.GNU-stack noalloc noexec nowrite progbits
   1.647 +%endif
   1.648 +
   1.649 +; cpuflags
   1.650 +
   1.651 +%assign cpuflags_mmx      (1<<0)
   1.652 +%assign cpuflags_mmx2     (1<<1) | cpuflags_mmx
   1.653 +%assign cpuflags_3dnow    (1<<2) | cpuflags_mmx
   1.654 +%assign cpuflags_3dnow2   (1<<3) | cpuflags_3dnow
   1.655 +%assign cpuflags_sse      (1<<4) | cpuflags_mmx2
   1.656 +%assign cpuflags_sse2     (1<<5) | cpuflags_sse
   1.657 +%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
   1.658 +%assign cpuflags_sse3     (1<<7) | cpuflags_sse2
   1.659 +%assign cpuflags_ssse3    (1<<8) | cpuflags_sse3
   1.660 +%assign cpuflags_sse4     (1<<9) | cpuflags_ssse3
   1.661 +%assign cpuflags_sse42    (1<<10)| cpuflags_sse4
   1.662 +%assign cpuflags_avx      (1<<11)| cpuflags_sse42
   1.663 +%assign cpuflags_xop      (1<<12)| cpuflags_avx
   1.664 +%assign cpuflags_fma4     (1<<13)| cpuflags_avx
   1.665 +
   1.666 +%assign cpuflags_cache32  (1<<16)
   1.667 +%assign cpuflags_cache64  (1<<17)
   1.668 +%assign cpuflags_slowctz  (1<<18)
   1.669 +%assign cpuflags_lzcnt    (1<<19)
   1.670 +%assign cpuflags_misalign (1<<20)
   1.671 +%assign cpuflags_aligned  (1<<21) ; not a cpu feature, but a function variant
   1.672 +%assign cpuflags_atom     (1<<22)
   1.673 +
   1.674 +%define    cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
   1.675 +%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))
   1.676 +
   1.677 +; Takes up to 2 cpuflags from the above list.
   1.678 +; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
   1.679 +; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
   1.680 +%macro INIT_CPUFLAGS 0-2
   1.681 +    %if %0 >= 1
   1.682 +        %xdefine cpuname %1
   1.683 +        %assign cpuflags cpuflags_%1
   1.684 +        %if %0 >= 2
   1.685 +            %xdefine cpuname %1_%2
   1.686 +            %assign cpuflags cpuflags | cpuflags_%2
   1.687 +        %endif
   1.688 +        %xdefine SUFFIX _ %+ cpuname
   1.689 +        %if cpuflag(avx)
   1.690 +            %assign avx_enabled 1
   1.691 +        %endif
   1.692 +        %if mmsize == 16 && notcpuflag(sse2)
   1.693 +            %define mova movaps
   1.694 +            %define movu movups
   1.695 +            %define movnta movntps
   1.696 +        %endif
   1.697 +        %if cpuflag(aligned)
   1.698 +            %define movu mova
   1.699 +        %elifidn %1, sse3
   1.700 +            %define movu lddqu
   1.701 +        %endif
   1.702 +    %else
   1.703 +        %xdefine SUFFIX
   1.704 +        %undef cpuname
   1.705 +        %undef cpuflags
   1.706 +    %endif
   1.707 +%endmacro
   1.708 +
   1.709 +; merge mmx and sse*
   1.710 +
   1.711 +%macro CAT_XDEFINE 3
   1.712 +    %xdefine %1%2 %3
   1.713 +%endmacro
   1.714 +
   1.715 +%macro CAT_UNDEF 2
   1.716 +    %undef %1%2
   1.717 +%endmacro
   1.718 +
   1.719 +%macro INIT_MMX 0-1+
   1.720 +    %assign avx_enabled 0
   1.721 +    %define RESET_MM_PERMUTATION INIT_MMX %1
   1.722 +    %define mmsize 8
   1.723 +    %define num_mmregs 8
   1.724 +    %define mova movq
   1.725 +    %define movu movq
   1.726 +    %define movh movd
   1.727 +    %define movnta movntq
   1.728 +    %assign %%i 0
   1.729 +    %rep 8
   1.730 +    CAT_XDEFINE m, %%i, mm %+ %%i
   1.731 +    CAT_XDEFINE nmm, %%i, %%i
   1.732 +    %assign %%i %%i+1
   1.733 +    %endrep
   1.734 +    %rep 8
   1.735 +    CAT_UNDEF m, %%i
   1.736 +    CAT_UNDEF nmm, %%i
   1.737 +    %assign %%i %%i+1
   1.738 +    %endrep
   1.739 +    INIT_CPUFLAGS %1
   1.740 +%endmacro
   1.741 +
   1.742 +%macro INIT_XMM 0-1+
   1.743 +    %assign avx_enabled 0
   1.744 +    %define RESET_MM_PERMUTATION INIT_XMM %1
   1.745 +    %define mmsize 16
   1.746 +    %define num_mmregs 8
   1.747 +    %if ARCH_X86_64
   1.748 +    %define num_mmregs 16
   1.749 +    %endif
   1.750 +    %define mova movdqa
   1.751 +    %define movu movdqu
   1.752 +    %define movh movq
   1.753 +    %define movnta movntdq
   1.754 +    %assign %%i 0
   1.755 +    %rep num_mmregs
   1.756 +    CAT_XDEFINE m, %%i, xmm %+ %%i
   1.757 +    CAT_XDEFINE nxmm, %%i, %%i
   1.758 +    %assign %%i %%i+1
   1.759 +    %endrep
   1.760 +    INIT_CPUFLAGS %1
   1.761 +%endmacro
   1.762 +
   1.763 +; FIXME: INIT_AVX can be replaced by INIT_XMM avx
   1.764 +%macro INIT_AVX 0
   1.765 +    INIT_XMM
   1.766 +    %assign avx_enabled 1
   1.767 +    %define PALIGNR PALIGNR_SSSE3
   1.768 +    %define RESET_MM_PERMUTATION INIT_AVX
   1.769 +%endmacro
   1.770 +
   1.771 +%macro INIT_YMM 0-1+
   1.772 +    %assign avx_enabled 1
   1.773 +    %define RESET_MM_PERMUTATION INIT_YMM %1
   1.774 +    %define mmsize 32
   1.775 +    %define num_mmregs 8
   1.776 +    %if ARCH_X86_64
   1.777 +    %define num_mmregs 16
   1.778 +    %endif
   1.779 +    %define mova vmovaps
   1.780 +    %define movu vmovups
   1.781 +    %undef movh
   1.782 +    %define movnta vmovntps
   1.783 +    %assign %%i 0
   1.784 +    %rep num_mmregs
   1.785 +    CAT_XDEFINE m, %%i, ymm %+ %%i
   1.786 +    CAT_XDEFINE nymm, %%i, %%i
   1.787 +    %assign %%i %%i+1
   1.788 +    %endrep
   1.789 +    INIT_CPUFLAGS %1
   1.790 +%endmacro
   1.791 +
   1.792 +INIT_XMM
   1.793 +
   1.794 +; I often want to use macros that permute their arguments. e.g. there's no
   1.795 +; efficient way to implement butterfly or transpose or dct without swapping some
   1.796 +; arguments.
   1.797 +;
   1.798 +; I would like to not have to manually keep track of the permutations:
   1.799 +; If I insert a permutation in the middle of a function, it should automatically
   1.800 +; change everything that follows. For more complex macros I may also have multiple
   1.801 +; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
   1.802 +;
   1.803 +; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
   1.804 +; permutes its arguments. It's equivalent to exchanging the contents of the
   1.805 +; registers, except that this way you exchange the register names instead, so it
   1.806 +; doesn't cost any cycles.
   1.807 +
   1.808 +%macro PERMUTE 2-* ; takes a list of pairs to swap
   1.809 +%rep %0/2
   1.810 +    %xdefine tmp%2 m%2
   1.811 +    %xdefine ntmp%2 nm%2
   1.812 +    %rotate 2
   1.813 +%endrep
   1.814 +%rep %0/2
   1.815 +    %xdefine m%1 tmp%2
   1.816 +    %xdefine nm%1 ntmp%2
   1.817 +    %undef tmp%2
   1.818 +    %undef ntmp%2
   1.819 +    %rotate 2
   1.820 +%endrep
   1.821 +%endmacro
   1.822 +
   1.823 +%macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
   1.824 +%rep %0-1
   1.825 +%ifdef m%1
   1.826 +    %xdefine tmp m%1
   1.827 +    %xdefine m%1 m%2
   1.828 +    %xdefine m%2 tmp
   1.829 +    CAT_XDEFINE n, m%1, %1
   1.830 +    CAT_XDEFINE n, m%2, %2
   1.831 +%else
   1.832 +    ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
   1.833 +    ; Be careful using this mode in nested macros though, as in some cases there may be
   1.834 +    ; other copies of m# that have already been dereferenced and don't get updated correctly.
   1.835 +    %xdefine %%n1 n %+ %1
   1.836 +    %xdefine %%n2 n %+ %2
   1.837 +    %xdefine tmp m %+ %%n1
   1.838 +    CAT_XDEFINE m, %%n1, m %+ %%n2
   1.839 +    CAT_XDEFINE m, %%n2, tmp
   1.840 +    CAT_XDEFINE n, m %+ %%n1, %%n1
   1.841 +    CAT_XDEFINE n, m %+ %%n2, %%n2
   1.842 +%endif
   1.843 +    %undef tmp
   1.844 +    %rotate 1
   1.845 +%endrep
   1.846 +%endmacro
   1.847 +
   1.848 +; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
   1.849 +; calls to that function will automatically load the permutation, so values can
   1.850 +; be returned in mmregs.
   1.851 +%macro SAVE_MM_PERMUTATION 0-1
   1.852 +    %if %0
   1.853 +        %xdefine %%f %1_m
   1.854 +    %else
   1.855 +        %xdefine %%f current_function %+ _m
   1.856 +    %endif
   1.857 +    %assign %%i 0
   1.858 +    %rep num_mmregs
   1.859 +        CAT_XDEFINE %%f, %%i, m %+ %%i
   1.860 +    %assign %%i %%i+1
   1.861 +    %endrep
   1.862 +%endmacro
   1.863 +
   1.864 +%macro LOAD_MM_PERMUTATION 1 ; name to load from
   1.865 +    %ifdef %1_m0
   1.866 +        %assign %%i 0
   1.867 +        %rep num_mmregs
   1.868 +            CAT_XDEFINE m, %%i, %1_m %+ %%i
   1.869 +            CAT_XDEFINE n, m %+ %%i, %%i
   1.870 +        %assign %%i %%i+1
   1.871 +        %endrep
   1.872 +    %endif
   1.873 +%endmacro
   1.874 +
   1.875 +; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
   1.876 +%macro call 1
   1.877 +    call_internal %1, %1 %+ SUFFIX
   1.878 +%endmacro
   1.879 +%macro call_internal 2
   1.880 +    %xdefine %%i %1
   1.881 +    %ifndef cglobaled_%1
   1.882 +        %ifdef cglobaled_%2
   1.883 +            %xdefine %%i %2
   1.884 +        %endif
   1.885 +    %endif
   1.886 +    call %%i
   1.887 +    LOAD_MM_PERMUTATION %%i
   1.888 +%endmacro
   1.889 +
   1.890 +; Substitutions that reduce instruction size but are functionally equivalent
   1.891 +%macro add 2
   1.892 +    %ifnum %2
   1.893 +        %if %2==128
   1.894 +            sub %1, -128
   1.895 +        %else
   1.896 +            add %1, %2
   1.897 +        %endif
   1.898 +    %else
   1.899 +        add %1, %2
   1.900 +    %endif
   1.901 +%endmacro
   1.902 +
   1.903 +%macro sub 2
   1.904 +    %ifnum %2
   1.905 +        %if %2==128
   1.906 +            add %1, -128
   1.907 +        %else
   1.908 +            sub %1, %2
   1.909 +        %endif
   1.910 +    %else
   1.911 +        sub %1, %2
   1.912 +    %endif
   1.913 +%endmacro
   1.914 +
   1.915 +;=============================================================================
   1.916 +; AVX abstraction layer
   1.917 +;=============================================================================
   1.918 +
   1.919 +%assign i 0
   1.920 +%rep 16
   1.921 +    %if i < 8
   1.922 +        CAT_XDEFINE sizeofmm, i, 8
   1.923 +    %endif
   1.924 +    CAT_XDEFINE sizeofxmm, i, 16
   1.925 +    CAT_XDEFINE sizeofymm, i, 32
   1.926 +%assign i i+1
   1.927 +%endrep
   1.928 +%undef i
   1.929 +
   1.930 +;%1 == instruction
   1.931 +;%2 == 1 if float, 0 if int
   1.932 +;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm)
   1.933 +;%4 == number of operands given
   1.934 +;%5+: operands
   1.935 +%macro RUN_AVX_INSTR 6-7+
   1.936 +    %ifid %5
   1.937 +        %define %%size sizeof%5
   1.938 +    %else
   1.939 +        %define %%size mmsize
   1.940 +    %endif
   1.941 +    %if %%size==32
   1.942 +        %if %0 >= 7
   1.943 +            v%1 %5, %6, %7
   1.944 +        %else
   1.945 +            v%1 %5, %6
   1.946 +        %endif
   1.947 +    %else
   1.948 +        %if %%size==8
   1.949 +            %define %%regmov movq
   1.950 +        %elif %2
   1.951 +            %define %%regmov movaps
   1.952 +        %else
   1.953 +            %define %%regmov movdqa
   1.954 +        %endif
   1.955 +
   1.956 +        %if %4>=3+%3
   1.957 +            %ifnidn %5, %6
   1.958 +                %if avx_enabled && sizeof%5==16
   1.959 +                    v%1 %5, %6, %7
   1.960 +                %else
   1.961 +                    %%regmov %5, %6
   1.962 +                    %1 %5, %7
   1.963 +                %endif
   1.964 +            %else
   1.965 +                %1 %5, %7
   1.966 +            %endif
   1.967 +        %elif %3
   1.968 +            %1 %5, %6, %7
   1.969 +        %else
   1.970 +            %1 %5, %6
   1.971 +        %endif
   1.972 +    %endif
   1.973 +%endmacro
   1.974 +
   1.975 +; 3arg AVX ops with a memory arg can only have it in src2,
   1.976 +; whereas SSE emulation of 3arg prefers to have it in src1 (i.e. the mov).
   1.977 +; So, if the op is symmetric and the wrong one is memory, swap them.
   1.978 +%macro RUN_AVX_INSTR1 8
   1.979 +    %assign %%swap 0
   1.980 +    %if avx_enabled
   1.981 +        %ifnid %6
   1.982 +            %assign %%swap 1
   1.983 +        %endif
   1.984 +    %elifnidn %5, %6
   1.985 +        %ifnid %7
   1.986 +            %assign %%swap 1
   1.987 +        %endif
   1.988 +    %endif
   1.989 +    %if %%swap && %3 == 0 && %8 == 1
   1.990 +        RUN_AVX_INSTR %1, %2, %3, %4, %5, %7, %6
   1.991 +    %else
   1.992 +        RUN_AVX_INSTR %1, %2, %3, %4, %5, %6, %7
   1.993 +    %endif
   1.994 +%endmacro
   1.995 +
   1.996 +;%1 == instruction
   1.997 +;%2 == 1 if float, 0 if int
   1.998 +;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 3-operand (xmm, xmm, xmm)
   1.999 +;%4 == 1 if symmetric (i.e. doesn't matter which src arg is which), 0 if not
  1.1000 +%macro AVX_INSTR 4
  1.1001 +    %macro %1 2-9 fnord, fnord, fnord, %1, %2, %3, %4
  1.1002 +        %ifidn %3, fnord
  1.1003 +            RUN_AVX_INSTR %6, %7, %8, 2, %1, %2
  1.1004 +        %elifidn %4, fnord
  1.1005 +            RUN_AVX_INSTR1 %6, %7, %8, 3, %1, %2, %3, %9
  1.1006 +        %elifidn %5, fnord
  1.1007 +            RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4
  1.1008 +        %else
  1.1009 +            RUN_AVX_INSTR %6, %7, %8, 5, %1, %2, %3, %4, %5
  1.1010 +        %endif
  1.1011 +    %endmacro
  1.1012 +%endmacro
  1.1013 +
  1.1014 +AVX_INSTR addpd, 1, 0, 1
  1.1015 +AVX_INSTR addps, 1, 0, 1
  1.1016 +AVX_INSTR addsd, 1, 0, 1
  1.1017 +AVX_INSTR addss, 1, 0, 1
  1.1018 +AVX_INSTR addsubpd, 1, 0, 0
  1.1019 +AVX_INSTR addsubps, 1, 0, 0
  1.1020 +AVX_INSTR andpd, 1, 0, 1
  1.1021 +AVX_INSTR andps, 1, 0, 1
  1.1022 +AVX_INSTR andnpd, 1, 0, 0
  1.1023 +AVX_INSTR andnps, 1, 0, 0
  1.1024 +AVX_INSTR blendpd, 1, 0, 0
  1.1025 +AVX_INSTR blendps, 1, 0, 0
  1.1026 +AVX_INSTR blendvpd, 1, 0, 0
  1.1027 +AVX_INSTR blendvps, 1, 0, 0
  1.1028 +AVX_INSTR cmppd, 1, 0, 0
  1.1029 +AVX_INSTR cmpps, 1, 0, 0
  1.1030 +AVX_INSTR cmpsd, 1, 0, 0
  1.1031 +AVX_INSTR cmpss, 1, 0, 0
  1.1032 +AVX_INSTR cvtdq2ps, 1, 0, 0
  1.1033 +AVX_INSTR cvtps2dq, 1, 0, 0
  1.1034 +AVX_INSTR divpd, 1, 0, 0
  1.1035 +AVX_INSTR divps, 1, 0, 0
  1.1036 +AVX_INSTR divsd, 1, 0, 0
  1.1037 +AVX_INSTR divss, 1, 0, 0
  1.1038 +AVX_INSTR dppd, 1, 1, 0
  1.1039 +AVX_INSTR dpps, 1, 1, 0
  1.1040 +AVX_INSTR haddpd, 1, 0, 0
  1.1041 +AVX_INSTR haddps, 1, 0, 0
  1.1042 +AVX_INSTR hsubpd, 1, 0, 0
  1.1043 +AVX_INSTR hsubps, 1, 0, 0
  1.1044 +AVX_INSTR maxpd, 1, 0, 1
  1.1045 +AVX_INSTR maxps, 1, 0, 1
  1.1046 +AVX_INSTR maxsd, 1, 0, 1
  1.1047 +AVX_INSTR maxss, 1, 0, 1
  1.1048 +AVX_INSTR minpd, 1, 0, 1
  1.1049 +AVX_INSTR minps, 1, 0, 1
  1.1050 +AVX_INSTR minsd, 1, 0, 1
  1.1051 +AVX_INSTR minss, 1, 0, 1
  1.1052 +AVX_INSTR movhlps, 1, 0, 0
  1.1053 +AVX_INSTR movlhps, 1, 0, 0
  1.1054 +AVX_INSTR movsd, 1, 0, 0
  1.1055 +AVX_INSTR movss, 1, 0, 0
  1.1056 +AVX_INSTR mpsadbw, 0, 1, 0
  1.1057 +AVX_INSTR mulpd, 1, 0, 1
  1.1058 +AVX_INSTR mulps, 1, 0, 1
  1.1059 +AVX_INSTR mulsd, 1, 0, 1
  1.1060 +AVX_INSTR mulss, 1, 0, 1
  1.1061 +AVX_INSTR orpd, 1, 0, 1
  1.1062 +AVX_INSTR orps, 1, 0, 1
  1.1063 +AVX_INSTR packsswb, 0, 0, 0
  1.1064 +AVX_INSTR packssdw, 0, 0, 0
  1.1065 +AVX_INSTR packuswb, 0, 0, 0
  1.1066 +AVX_INSTR packusdw, 0, 0, 0
  1.1067 +AVX_INSTR paddb, 0, 0, 1
  1.1068 +AVX_INSTR paddw, 0, 0, 1
  1.1069 +AVX_INSTR paddd, 0, 0, 1
  1.1070 +AVX_INSTR paddq, 0, 0, 1
  1.1071 +AVX_INSTR paddsb, 0, 0, 1
  1.1072 +AVX_INSTR paddsw, 0, 0, 1
  1.1073 +AVX_INSTR paddusb, 0, 0, 1
  1.1074 +AVX_INSTR paddusw, 0, 0, 1
  1.1075 +AVX_INSTR palignr, 0, 1, 0
  1.1076 +AVX_INSTR pand, 0, 0, 1
  1.1077 +AVX_INSTR pandn, 0, 0, 0
  1.1078 +AVX_INSTR pavgb, 0, 0, 1
  1.1079 +AVX_INSTR pavgw, 0, 0, 1
  1.1080 +AVX_INSTR pblendvb, 0, 0, 0
  1.1081 +AVX_INSTR pblendw, 0, 1, 0
  1.1082 +AVX_INSTR pcmpestri, 0, 0, 0
  1.1083 +AVX_INSTR pcmpestrm, 0, 0, 0
  1.1084 +AVX_INSTR pcmpistri, 0, 0, 0
  1.1085 +AVX_INSTR pcmpistrm, 0, 0, 0
  1.1086 +AVX_INSTR pcmpeqb, 0, 0, 1
  1.1087 +AVX_INSTR pcmpeqw, 0, 0, 1
  1.1088 +AVX_INSTR pcmpeqd, 0, 0, 1
  1.1089 +AVX_INSTR pcmpeqq, 0, 0, 1
  1.1090 +AVX_INSTR pcmpgtb, 0, 0, 0
  1.1091 +AVX_INSTR pcmpgtw, 0, 0, 0
  1.1092 +AVX_INSTR pcmpgtd, 0, 0, 0
  1.1093 +AVX_INSTR pcmpgtq, 0, 0, 0
  1.1094 +AVX_INSTR phaddw, 0, 0, 0
  1.1095 +AVX_INSTR phaddd, 0, 0, 0
  1.1096 +AVX_INSTR phaddsw, 0, 0, 0
  1.1097 +AVX_INSTR phsubw, 0, 0, 0
  1.1098 +AVX_INSTR phsubd, 0, 0, 0
  1.1099 +AVX_INSTR phsubsw, 0, 0, 0
  1.1100 +AVX_INSTR pmaddwd, 0, 0, 1
  1.1101 +AVX_INSTR pmaddubsw, 0, 0, 0
  1.1102 +AVX_INSTR pmaxsb, 0, 0, 1
  1.1103 +AVX_INSTR pmaxsw, 0, 0, 1
  1.1104 +AVX_INSTR pmaxsd, 0, 0, 1
  1.1105 +AVX_INSTR pmaxub, 0, 0, 1
  1.1106 +AVX_INSTR pmaxuw, 0, 0, 1
  1.1107 +AVX_INSTR pmaxud, 0, 0, 1
  1.1108 +AVX_INSTR pminsb, 0, 0, 1
  1.1109 +AVX_INSTR pminsw, 0, 0, 1
  1.1110 +AVX_INSTR pminsd, 0, 0, 1
  1.1111 +AVX_INSTR pminub, 0, 0, 1
  1.1112 +AVX_INSTR pminuw, 0, 0, 1
  1.1113 +AVX_INSTR pminud, 0, 0, 1
  1.1114 +AVX_INSTR pmulhuw, 0, 0, 1
  1.1115 +AVX_INSTR pmulhrsw, 0, 0, 1
  1.1116 +AVX_INSTR pmulhw, 0, 0, 1
  1.1117 +AVX_INSTR pmullw, 0, 0, 1
  1.1118 +AVX_INSTR pmulld, 0, 0, 1
  1.1119 +AVX_INSTR pmuludq, 0, 0, 1
  1.1120 +AVX_INSTR pmuldq, 0, 0, 1
  1.1121 +AVX_INSTR por, 0, 0, 1
  1.1122 +AVX_INSTR psadbw, 0, 0, 1
  1.1123 +AVX_INSTR pshufb, 0, 0, 0
  1.1124 +AVX_INSTR psignb, 0, 0, 0
  1.1125 +AVX_INSTR psignw, 0, 0, 0
  1.1126 +AVX_INSTR psignd, 0, 0, 0
  1.1127 +AVX_INSTR psllw, 0, 0, 0
  1.1128 +AVX_INSTR pslld, 0, 0, 0
  1.1129 +AVX_INSTR psllq, 0, 0, 0
  1.1130 +AVX_INSTR pslldq, 0, 0, 0
  1.1131 +AVX_INSTR psraw, 0, 0, 0
  1.1132 +AVX_INSTR psrad, 0, 0, 0
  1.1133 +AVX_INSTR psrlw, 0, 0, 0
  1.1134 +AVX_INSTR psrld, 0, 0, 0
  1.1135 +AVX_INSTR psrlq, 0, 0, 0
  1.1136 +AVX_INSTR psrldq, 0, 0, 0
  1.1137 +AVX_INSTR psubb, 0, 0, 0
  1.1138 +AVX_INSTR psubw, 0, 0, 0
  1.1139 +AVX_INSTR psubd, 0, 0, 0
  1.1140 +AVX_INSTR psubq, 0, 0, 0
  1.1141 +AVX_INSTR psubsb, 0, 0, 0
  1.1142 +AVX_INSTR psubsw, 0, 0, 0
  1.1143 +AVX_INSTR psubusb, 0, 0, 0
  1.1144 +AVX_INSTR psubusw, 0, 0, 0
  1.1145 +AVX_INSTR punpckhbw, 0, 0, 0
  1.1146 +AVX_INSTR punpckhwd, 0, 0, 0
  1.1147 +AVX_INSTR punpckhdq, 0, 0, 0
  1.1148 +AVX_INSTR punpckhqdq, 0, 0, 0
  1.1149 +AVX_INSTR punpcklbw, 0, 0, 0
  1.1150 +AVX_INSTR punpcklwd, 0, 0, 0
  1.1151 +AVX_INSTR punpckldq, 0, 0, 0
  1.1152 +AVX_INSTR punpcklqdq, 0, 0, 0
  1.1153 +AVX_INSTR pxor, 0, 0, 1
  1.1154 +AVX_INSTR shufps, 1, 1, 0
  1.1155 +AVX_INSTR subpd, 1, 0, 0
  1.1156 +AVX_INSTR subps, 1, 0, 0
  1.1157 +AVX_INSTR subsd, 1, 0, 0
  1.1158 +AVX_INSTR subss, 1, 0, 0
  1.1159 +AVX_INSTR unpckhpd, 1, 0, 0
  1.1160 +AVX_INSTR unpckhps, 1, 0, 0
  1.1161 +AVX_INSTR unpcklpd, 1, 0, 0
  1.1162 +AVX_INSTR unpcklps, 1, 0, 0
  1.1163 +AVX_INSTR xorpd, 1, 0, 1
  1.1164 +AVX_INSTR xorps, 1, 0, 1
  1.1165 +
  1.1166 +; 3DNow instructions, for sharing code between AVX, SSE and 3DN
  1.1167 +AVX_INSTR pfadd, 1, 0, 1
  1.1168 +AVX_INSTR pfsub, 1, 0, 0
  1.1169 +AVX_INSTR pfmul, 1, 0, 1
  1.1170 +
  1.1171 +; base-4 constants for shuffles
  1.1172 +%assign i 0
  1.1173 +%rep 256
  1.1174 +    %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
  1.1175 +    %if j < 10
  1.1176 +        CAT_XDEFINE q000, j, i
  1.1177 +    %elif j < 100
  1.1178 +        CAT_XDEFINE q00, j, i
  1.1179 +    %elif j < 1000
  1.1180 +        CAT_XDEFINE q0, j, i
  1.1181 +    %else
  1.1182 +        CAT_XDEFINE q, j, i
  1.1183 +    %endif
  1.1184 +%assign i i+1
  1.1185 +%endrep
  1.1186 +%undef i
  1.1187 +%undef j
  1.1188 +
  1.1189 +%macro FMA_INSTR 3
  1.1190 +    %macro %1 4-7 %1, %2, %3
  1.1191 +        %if cpuflag(xop)
  1.1192 +            v%5 %1, %2, %3, %4
  1.1193 +        %else
  1.1194 +            %6 %1, %2, %3
  1.1195 +            %7 %1, %4
  1.1196 +        %endif
  1.1197 +    %endmacro
  1.1198 +%endmacro
  1.1199 +
  1.1200 +FMA_INSTR  pmacsdd,  pmulld, paddd
  1.1201 +FMA_INSTR  pmacsww,  pmullw, paddw
  1.1202 +FMA_INSTR pmadcswd, pmaddwd, paddd

mercurial