Files
mozjpeg/simd/i386/jfdctflt-sse.asm
DRC 19c791cdac Improve code formatting consistency
With rare exceptions ...
- Always separate line continuation characters by one space from
  preceding code.
- Always use two-space indentation.  Never use tabs.
- Always use K&R-style conditional blocks.
- Always surround operators with spaces, except in raw assembly code.
- Always put a space after, but not before, a comma.
- Never put a space between type casts and variables/function calls.
- Never put a space between the function name and the argument list in
  function declarations and prototypes.
- Always surround braces ('{' and '}') with spaces.
- Always surround statements (if, for, else, catch, while, do, switch)
  with spaces.
- Always attach pointer symbols ('*' and '**') to the variable or
  function name.
- Always precede pointer symbols ('*' and '**') by a space in type
  casts.
- Use the MIN() macro from jpegint.h within the libjpeg and TurboJPEG
  API libraries (using min() from tjutil.h is still necessary for
  TJBench.)
- Where it makes sense (particularly in the TurboJPEG code), put a blank
  line after variable declaration blocks.
- Always separate statements in one-liners by two spaces.

The purpose of this was to ease maintenance on my part and also to make
it easier for contributors to figure out how to format patch
submissions.  This was admittedly confusing (even to me sometimes) when
we had 3 or 4 different style conventions in the same source tree.  The
new convention is more consistent with the formatting of other OSS code
bases.

This commit corrects deviations from the chosen formatting style in the
libjpeg API code and reformats the TurboJPEG API code such that it
conforms to the same standard.

NOTES:
- Although it is no longer necessary for the function name in function
  declarations to begin in Column 1 (this was historically necessary
  because of the ansi2knr utility, which allowed libjpeg to be built
  with non-ANSI compilers), we retain that formatting for the libjpeg
  code because it improves readability when using libjpeg's function
  attribute macros (GLOBAL(), etc.)
- This reformatting project was accomplished with the help of AStyle and
  Uncrustify, although neither was completely up to the task, and thus
  a great deal of manual tweaking was required.  Note to developers of
  code formatting utilities:  the libjpeg-turbo code base is an
  excellent test bed, because AFAICT, it breaks every single one of the
  utilities that are currently available.
- The legacy (MMX, SSE, 3DNow!) assembly code for i386 has been
  formatted to match the SSE2 code (refer to
  ff5685d5344273df321eb63a005eaae19d2496e3.)  I hadn't intended to
  bother with this, but the Loongson MMI implementation demonstrated
  that there is still academic value to the MMX implementation, as an
  algorithmic model for other 64-bit vector implementations.  Thus, it
  is desirable to improve its readability in the same manner as that of
  the SSE2 implementation.
2018-03-16 02:14:34 -05:00

372 lines
15 KiB
NASM

;
; jfdctflt.asm - floating-point FDCT (SSE)
;
; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
; Copyright (C) 2016, D. R. Commander.
;
; Based on the x86 SIMD extension for IJG JPEG library
; Copyright (C) 1999-2006, MIYASAKA Masaru.
; For conditions of distribution and use, see copyright notice in jsimdext.inc
;
; This file should be assembled with NASM (Netwide Assembler),
; can *not* be assembled with Microsoft's MASM or any compatible
; assembler (including Borland's Turbo Assembler).
; NASM is available from http://nasm.sourceforge.net/ or
; http://sourceforge.net/project/showfiles.php?group_id=6208
;
; This file contains a floating-point implementation of the forward DCT
; (Discrete Cosine Transform). The following code is based directly on
; the IJG's original jfdctflt.c; see the jfdctflt.c for more details.
;
; [TAB8]
%include "jsimdext.inc"
%include "jdct.inc"
; --------------------------------------------------------------------------
%macro unpcklps2 2 ; %1=(0 1 2 3) / %2=(4 5 6 7) => %1=(0 1 4 5)
shufps %1, %2, 0x44
%endmacro
%macro unpckhps2 2 ; %1=(0 1 2 3) / %2=(4 5 6 7) => %1=(2 3 6 7)
shufps %1, %2, 0xEE
%endmacro
; --------------------------------------------------------------------------
SECTION SEG_CONST
alignz 32
GLOBAL_DATA(jconst_fdct_float_sse)
EXTN(jconst_fdct_float_sse):
PD_0_382 times 4 dd 0.382683432365089771728460
PD_0_707 times 4 dd 0.707106781186547524400844
PD_0_541 times 4 dd 0.541196100146196984399723
PD_1_306 times 4 dd 1.306562964876376527856643
alignz 32
; --------------------------------------------------------------------------
SECTION SEG_TEXT
BITS 32
;
; Perform the forward DCT on one block of samples.
;
; GLOBAL(void)
; jsimd_fdct_float_sse(FAST_FLOAT *data)
;
%define data(b) (b) + 8 ; FAST_FLOAT *data
%define original_ebp ebp + 0
%define wk(i) ebp - (WK_NUM - (i)) * SIZEOF_XMMWORD
; xmmword wk[WK_NUM]
%define WK_NUM 2
align 32
GLOBAL_FUNCTION(jsimd_fdct_float_sse)
EXTN(jsimd_fdct_float_sse):
push ebp
mov eax, esp ; eax = original ebp
sub esp, byte 4
and esp, byte (-SIZEOF_XMMWORD) ; align to 128 bits
mov [esp], eax
mov ebp, esp ; ebp = aligned ebp
lea esp, [wk(0)]
pushpic ebx
; push ecx ; need not be preserved
; push edx ; need not be preserved
; push esi ; unused
; push edi ; unused
get_GOT ebx ; get GOT address
; ---- Pass 1: process rows.
mov edx, POINTER [data(eax)] ; (FAST_FLOAT *)
mov ecx, DCTSIZE/4
alignx 16, 7
.rowloop:
movaps xmm0, XMMWORD [XMMBLOCK(2,0,edx,SIZEOF_FAST_FLOAT)]
movaps xmm1, XMMWORD [XMMBLOCK(3,0,edx,SIZEOF_FAST_FLOAT)]
movaps xmm2, XMMWORD [XMMBLOCK(2,1,edx,SIZEOF_FAST_FLOAT)]
movaps xmm3, XMMWORD [XMMBLOCK(3,1,edx,SIZEOF_FAST_FLOAT)]
; xmm0=(20 21 22 23), xmm2=(24 25 26 27)
; xmm1=(30 31 32 33), xmm3=(34 35 36 37)
movaps xmm4, xmm0 ; transpose coefficients(phase 1)
unpcklps xmm0, xmm1 ; xmm0=(20 30 21 31)
unpckhps xmm4, xmm1 ; xmm4=(22 32 23 33)
movaps xmm5, xmm2 ; transpose coefficients(phase 1)
unpcklps xmm2, xmm3 ; xmm2=(24 34 25 35)
unpckhps xmm5, xmm3 ; xmm5=(26 36 27 37)
movaps xmm6, XMMWORD [XMMBLOCK(0,0,edx,SIZEOF_FAST_FLOAT)]
movaps xmm7, XMMWORD [XMMBLOCK(1,0,edx,SIZEOF_FAST_FLOAT)]
movaps xmm1, XMMWORD [XMMBLOCK(0,1,edx,SIZEOF_FAST_FLOAT)]
movaps xmm3, XMMWORD [XMMBLOCK(1,1,edx,SIZEOF_FAST_FLOAT)]
; xmm6=(00 01 02 03), xmm1=(04 05 06 07)
; xmm7=(10 11 12 13), xmm3=(14 15 16 17)
movaps XMMWORD [wk(0)], xmm4 ; wk(0)=(22 32 23 33)
movaps XMMWORD [wk(1)], xmm2 ; wk(1)=(24 34 25 35)
movaps xmm4, xmm6 ; transpose coefficients(phase 1)
unpcklps xmm6, xmm7 ; xmm6=(00 10 01 11)
unpckhps xmm4, xmm7 ; xmm4=(02 12 03 13)
movaps xmm2, xmm1 ; transpose coefficients(phase 1)
unpcklps xmm1, xmm3 ; xmm1=(04 14 05 15)
unpckhps xmm2, xmm3 ; xmm2=(06 16 07 17)
movaps xmm7, xmm6 ; transpose coefficients(phase 2)
unpcklps2 xmm6, xmm0 ; xmm6=(00 10 20 30)=data0
unpckhps2 xmm7, xmm0 ; xmm7=(01 11 21 31)=data1
movaps xmm3, xmm2 ; transpose coefficients(phase 2)
unpcklps2 xmm2, xmm5 ; xmm2=(06 16 26 36)=data6
unpckhps2 xmm3, xmm5 ; xmm3=(07 17 27 37)=data7
movaps xmm0, xmm7
movaps xmm5, xmm6
subps xmm7, xmm2 ; xmm7=data1-data6=tmp6
subps xmm6, xmm3 ; xmm6=data0-data7=tmp7
addps xmm0, xmm2 ; xmm0=data1+data6=tmp1
addps xmm5, xmm3 ; xmm5=data0+data7=tmp0
movaps xmm2, XMMWORD [wk(0)] ; xmm2=(22 32 23 33)
movaps xmm3, XMMWORD [wk(1)] ; xmm3=(24 34 25 35)
movaps XMMWORD [wk(0)], xmm7 ; wk(0)=tmp6
movaps XMMWORD [wk(1)], xmm6 ; wk(1)=tmp7
movaps xmm7, xmm4 ; transpose coefficients(phase 2)
unpcklps2 xmm4, xmm2 ; xmm4=(02 12 22 32)=data2
unpckhps2 xmm7, xmm2 ; xmm7=(03 13 23 33)=data3
movaps xmm6, xmm1 ; transpose coefficients(phase 2)
unpcklps2 xmm1, xmm3 ; xmm1=(04 14 24 34)=data4
unpckhps2 xmm6, xmm3 ; xmm6=(05 15 25 35)=data5
movaps xmm2, xmm7
movaps xmm3, xmm4
addps xmm7, xmm1 ; xmm7=data3+data4=tmp3
addps xmm4, xmm6 ; xmm4=data2+data5=tmp2
subps xmm2, xmm1 ; xmm2=data3-data4=tmp4
subps xmm3, xmm6 ; xmm3=data2-data5=tmp5
; -- Even part
movaps xmm1, xmm5
movaps xmm6, xmm0
subps xmm5, xmm7 ; xmm5=tmp13
subps xmm0, xmm4 ; xmm0=tmp12
addps xmm1, xmm7 ; xmm1=tmp10
addps xmm6, xmm4 ; xmm6=tmp11
addps xmm0, xmm5
mulps xmm0, [GOTOFF(ebx,PD_0_707)] ; xmm0=z1
movaps xmm7, xmm1
movaps xmm4, xmm5
subps xmm1, xmm6 ; xmm1=data4
subps xmm5, xmm0 ; xmm5=data6
addps xmm7, xmm6 ; xmm7=data0
addps xmm4, xmm0 ; xmm4=data2
movaps XMMWORD [XMMBLOCK(0,1,edx,SIZEOF_FAST_FLOAT)], xmm1
movaps XMMWORD [XMMBLOCK(2,1,edx,SIZEOF_FAST_FLOAT)], xmm5
movaps XMMWORD [XMMBLOCK(0,0,edx,SIZEOF_FAST_FLOAT)], xmm7
movaps XMMWORD [XMMBLOCK(2,0,edx,SIZEOF_FAST_FLOAT)], xmm4
; -- Odd part
movaps xmm6, XMMWORD [wk(0)] ; xmm6=tmp6
movaps xmm0, XMMWORD [wk(1)] ; xmm0=tmp7
addps xmm2, xmm3 ; xmm2=tmp10
addps xmm3, xmm6 ; xmm3=tmp11
addps xmm6, xmm0 ; xmm6=tmp12, xmm0=tmp7
mulps xmm3, [GOTOFF(ebx,PD_0_707)] ; xmm3=z3
movaps xmm1, xmm2 ; xmm1=tmp10
subps xmm2, xmm6
mulps xmm2, [GOTOFF(ebx,PD_0_382)] ; xmm2=z5
mulps xmm1, [GOTOFF(ebx,PD_0_541)] ; xmm1=MULTIPLY(tmp10,FIX_0_541196)
mulps xmm6, [GOTOFF(ebx,PD_1_306)] ; xmm6=MULTIPLY(tmp12,FIX_1_306562)
addps xmm1, xmm2 ; xmm1=z2
addps xmm6, xmm2 ; xmm6=z4
movaps xmm5, xmm0
subps xmm0, xmm3 ; xmm0=z13
addps xmm5, xmm3 ; xmm5=z11
movaps xmm7, xmm0
movaps xmm4, xmm5
subps xmm0, xmm1 ; xmm0=data3
subps xmm5, xmm6 ; xmm5=data7
addps xmm7, xmm1 ; xmm7=data5
addps xmm4, xmm6 ; xmm4=data1
movaps XMMWORD [XMMBLOCK(3,0,edx,SIZEOF_FAST_FLOAT)], xmm0
movaps XMMWORD [XMMBLOCK(3,1,edx,SIZEOF_FAST_FLOAT)], xmm5
movaps XMMWORD [XMMBLOCK(1,1,edx,SIZEOF_FAST_FLOAT)], xmm7
movaps XMMWORD [XMMBLOCK(1,0,edx,SIZEOF_FAST_FLOAT)], xmm4
add edx, 4*DCTSIZE*SIZEOF_FAST_FLOAT
dec ecx
jnz near .rowloop
; ---- Pass 2: process columns.
mov edx, POINTER [data(eax)] ; (FAST_FLOAT *)
mov ecx, DCTSIZE/4
alignx 16, 7
.columnloop:
movaps xmm0, XMMWORD [XMMBLOCK(2,0,edx,SIZEOF_FAST_FLOAT)]
movaps xmm1, XMMWORD [XMMBLOCK(3,0,edx,SIZEOF_FAST_FLOAT)]
movaps xmm2, XMMWORD [XMMBLOCK(6,0,edx,SIZEOF_FAST_FLOAT)]
movaps xmm3, XMMWORD [XMMBLOCK(7,0,edx,SIZEOF_FAST_FLOAT)]
; xmm0=(02 12 22 32), xmm2=(42 52 62 72)
; xmm1=(03 13 23 33), xmm3=(43 53 63 73)
movaps xmm4, xmm0 ; transpose coefficients(phase 1)
unpcklps xmm0, xmm1 ; xmm0=(02 03 12 13)
unpckhps xmm4, xmm1 ; xmm4=(22 23 32 33)
movaps xmm5, xmm2 ; transpose coefficients(phase 1)
unpcklps xmm2, xmm3 ; xmm2=(42 43 52 53)
unpckhps xmm5, xmm3 ; xmm5=(62 63 72 73)
movaps xmm6, XMMWORD [XMMBLOCK(0,0,edx,SIZEOF_FAST_FLOAT)]
movaps xmm7, XMMWORD [XMMBLOCK(1,0,edx,SIZEOF_FAST_FLOAT)]
movaps xmm1, XMMWORD [XMMBLOCK(4,0,edx,SIZEOF_FAST_FLOAT)]
movaps xmm3, XMMWORD [XMMBLOCK(5,0,edx,SIZEOF_FAST_FLOAT)]
; xmm6=(00 10 20 30), xmm1=(40 50 60 70)
; xmm7=(01 11 21 31), xmm3=(41 51 61 71)
movaps XMMWORD [wk(0)], xmm4 ; wk(0)=(22 23 32 33)
movaps XMMWORD [wk(1)], xmm2 ; wk(1)=(42 43 52 53)
movaps xmm4, xmm6 ; transpose coefficients(phase 1)
unpcklps xmm6, xmm7 ; xmm6=(00 01 10 11)
unpckhps xmm4, xmm7 ; xmm4=(20 21 30 31)
movaps xmm2, xmm1 ; transpose coefficients(phase 1)
unpcklps xmm1, xmm3 ; xmm1=(40 41 50 51)
unpckhps xmm2, xmm3 ; xmm2=(60 61 70 71)
movaps xmm7, xmm6 ; transpose coefficients(phase 2)
unpcklps2 xmm6, xmm0 ; xmm6=(00 01 02 03)=data0
unpckhps2 xmm7, xmm0 ; xmm7=(10 11 12 13)=data1
movaps xmm3, xmm2 ; transpose coefficients(phase 2)
unpcklps2 xmm2, xmm5 ; xmm2=(60 61 62 63)=data6
unpckhps2 xmm3, xmm5 ; xmm3=(70 71 72 73)=data7
movaps xmm0, xmm7
movaps xmm5, xmm6
subps xmm7, xmm2 ; xmm7=data1-data6=tmp6
subps xmm6, xmm3 ; xmm6=data0-data7=tmp7
addps xmm0, xmm2 ; xmm0=data1+data6=tmp1
addps xmm5, xmm3 ; xmm5=data0+data7=tmp0
movaps xmm2, XMMWORD [wk(0)] ; xmm2=(22 23 32 33)
movaps xmm3, XMMWORD [wk(1)] ; xmm3=(42 43 52 53)
movaps XMMWORD [wk(0)], xmm7 ; wk(0)=tmp6
movaps XMMWORD [wk(1)], xmm6 ; wk(1)=tmp7
movaps xmm7, xmm4 ; transpose coefficients(phase 2)
unpcklps2 xmm4, xmm2 ; xmm4=(20 21 22 23)=data2
unpckhps2 xmm7, xmm2 ; xmm7=(30 31 32 33)=data3
movaps xmm6, xmm1 ; transpose coefficients(phase 2)
unpcklps2 xmm1, xmm3 ; xmm1=(40 41 42 43)=data4
unpckhps2 xmm6, xmm3 ; xmm6=(50 51 52 53)=data5
movaps xmm2, xmm7
movaps xmm3, xmm4
addps xmm7, xmm1 ; xmm7=data3+data4=tmp3
addps xmm4, xmm6 ; xmm4=data2+data5=tmp2
subps xmm2, xmm1 ; xmm2=data3-data4=tmp4
subps xmm3, xmm6 ; xmm3=data2-data5=tmp5
; -- Even part
movaps xmm1, xmm5
movaps xmm6, xmm0
subps xmm5, xmm7 ; xmm5=tmp13
subps xmm0, xmm4 ; xmm0=tmp12
addps xmm1, xmm7 ; xmm1=tmp10
addps xmm6, xmm4 ; xmm6=tmp11
addps xmm0, xmm5
mulps xmm0, [GOTOFF(ebx,PD_0_707)] ; xmm0=z1
movaps xmm7, xmm1
movaps xmm4, xmm5
subps xmm1, xmm6 ; xmm1=data4
subps xmm5, xmm0 ; xmm5=data6
addps xmm7, xmm6 ; xmm7=data0
addps xmm4, xmm0 ; xmm4=data2
movaps XMMWORD [XMMBLOCK(4,0,edx,SIZEOF_FAST_FLOAT)], xmm1
movaps XMMWORD [XMMBLOCK(6,0,edx,SIZEOF_FAST_FLOAT)], xmm5
movaps XMMWORD [XMMBLOCK(0,0,edx,SIZEOF_FAST_FLOAT)], xmm7
movaps XMMWORD [XMMBLOCK(2,0,edx,SIZEOF_FAST_FLOAT)], xmm4
; -- Odd part
movaps xmm6, XMMWORD [wk(0)] ; xmm6=tmp6
movaps xmm0, XMMWORD [wk(1)] ; xmm0=tmp7
addps xmm2, xmm3 ; xmm2=tmp10
addps xmm3, xmm6 ; xmm3=tmp11
addps xmm6, xmm0 ; xmm6=tmp12, xmm0=tmp7
mulps xmm3, [GOTOFF(ebx,PD_0_707)] ; xmm3=z3
movaps xmm1, xmm2 ; xmm1=tmp10
subps xmm2, xmm6
mulps xmm2, [GOTOFF(ebx,PD_0_382)] ; xmm2=z5
mulps xmm1, [GOTOFF(ebx,PD_0_541)] ; xmm1=MULTIPLY(tmp10,FIX_0_541196)
mulps xmm6, [GOTOFF(ebx,PD_1_306)] ; xmm6=MULTIPLY(tmp12,FIX_1_306562)
addps xmm1, xmm2 ; xmm1=z2
addps xmm6, xmm2 ; xmm6=z4
movaps xmm5, xmm0
subps xmm0, xmm3 ; xmm0=z13
addps xmm5, xmm3 ; xmm5=z11
movaps xmm7, xmm0
movaps xmm4, xmm5
subps xmm0, xmm1 ; xmm0=data3
subps xmm5, xmm6 ; xmm5=data7
addps xmm7, xmm1 ; xmm7=data5
addps xmm4, xmm6 ; xmm4=data1
movaps XMMWORD [XMMBLOCK(3,0,edx,SIZEOF_FAST_FLOAT)], xmm0
movaps XMMWORD [XMMBLOCK(7,0,edx,SIZEOF_FAST_FLOAT)], xmm5
movaps XMMWORD [XMMBLOCK(5,0,edx,SIZEOF_FAST_FLOAT)], xmm7
movaps XMMWORD [XMMBLOCK(1,0,edx,SIZEOF_FAST_FLOAT)], xmm4
add edx, byte 4*SIZEOF_FAST_FLOAT
dec ecx
jnz near .columnloop
; pop edi ; unused
; pop esi ; unused
; pop edx ; need not be preserved
; pop ecx ; need not be preserved
poppic ebx
mov esp, ebp ; esp <- aligned ebp
pop esp ; esp <- original ebp
pop ebp
ret
; For some reason, the OS X linker does not honor the request to align the
; segment unless we do this.
align 32