%include "libavutil/x86/x86util.asm"

SECTION .text

%if ARCH_X86_64 == 0

;-----------------------------------------------------------------------------
; 
; Some function rely on a aligned stack pointer. This means that they assume
; that esp is in xxxxxxxCh (must end with 0Ch). It seems that only GCC is able
; to align the stack in that way. This macro can create a stub which rearranges
; the parameters on the stack, then calls into the function (which requires the
; stack alignment) and on return corrects the stack pointer again and returns to
; the caller.
;
; +-------------------------+   
; | return address          |   <----- esp
; +-------------------------+
; | argument 1              |
; +-------------------------+
; | argument 2              |
; +-------------------------+
; | argument 3              |
; +-------------------------+
;
; esp is 4-byte aligned, i.e. it may end with 00h, 04h, 08h or 0Ch. In the
; latter case, we have nothing to do and directly call into the wrapped 
; function. In the other cases we copy the arguments to an appropriate place:
;
;
;                                   +-------------------------+   
;                                   | return stub             | <----- esp (xxxCh)
; +-------------------------+       +-------------------------+   
; | return address          |       | argument 1              |
; +-------------------------+       +-------------------------+
; | argument 1              |       | argument 2              |  
; +-------------------------+  -->  +-------------------------+
; | argument 2              |       | argument 3              |
; +-------------------------+       +-------------------------+
; | argument 3              |       | original return addr.   |
; +-------------------------+       +-------------------------+
;
; The return stub corrects the stack pointer again to its original 
; location and jumps back to the original return address.
;
; first argument : the name of the function this stub will call
; second argument: the number of arguments (of the original function)
%macro STACKALIGNSTUB 2
cextern %1			; declare the method for which we create a stub as external
; can only use the scratch registers: eax, ecx, edx
mov edx,esp
and edx,0fh
cmp edx,0ch
jne .needToAlign
jmp %1
.needToAlign:
mov edx,esp
and edx,0fffffff0h	; edx now points to where the first argument must be placed
mov ecx,[esp]		; save the return address

%assign i 1
%rep 16				; repeat 16 times at max
%if i > %2
	%exitrep
%endif
mov eax,[esp+4*i]
mov [edx+4*i-4],eax
%assign i i+1
%endrep

mov [esp+4*%2],ecx	; store the return address

and esp,0fh
cmp esp,0
jne .l1
lea eax,[.retstub0]
jmp .l3
.l1:
cmp esp,04h
jne .l2
lea eax,[.retstub4]
jmp .l3
.l2:	; eax must now be 08h
lea eax,[.retstub8]
.l3:
mov esp,edx
sub esp,4
mov [esp],eax
jmp %1

.retstub0:
add esp,4
jmp [esp-4+4*%2]

.retstub4:
add esp,8
jmp [esp-4+4*%2]

.retstub8:
add esp,12
jmp [esp-4+4*%2]

%endmacro

;-----------------------------------------------------------------------------

cglobal alignstubbed_deblock_v_luma_8_sse2			
STACKALIGNSTUB deblock_v_luma_8_sse2,5

cglobal alignstubbed_deblock_h_luma_8_sse2			
STACKALIGNSTUB deblock_h_luma_8_sse2,5

cglobal alignstubbed_deblock_v_luma_intra_8_sse2	
STACKALIGNSTUB deblock_v_luma_intra_8_sse2,4

cglobal alignstubbed_deblock_h_luma_intra_8_sse2	
STACKALIGNSTUB deblock_h_luma_intra_8_sse2,4

cglobal alignstubbed_deblock_v_luma_8_avx			
STACKALIGNSTUB deblock_v_luma_8_avx,5

cglobal alignstubbed_deblock_h_luma_8_avx			
STACKALIGNSTUB deblock_h_luma_8_avx,5

cglobal alignstubbed_deblock_v_luma_intra_8_avx		
STACKALIGNSTUB deblock_v_luma_intra_8_avx,4

cglobal alignstubbed_deblock_h_luma_intra_8_avx		
STACKALIGNSTUB deblock_h_luma_intra_8_avx,4

cglobal alignstubbed_h264_idct8_add_10_sse2			
STACKALIGNSTUB h264_idct8_add_10_sse2,3

cglobal alignstubbed_h264_idct8_add4_10_sse2		
STACKALIGNSTUB h264_idct8_add4_10_sse2,5

cglobal alignstubbed_deblock_v_luma_10_sse2			
STACKALIGNSTUB deblock_v_luma_10_sse2,5

cglobal alignstubbed_deblock_h_luma_10_sse2;		
STACKALIGNSTUB deblock_h_luma_10_sse2,5

cglobal alignstubbed_deblock_v_luma_intra_10_sse2	
STACKALIGNSTUB deblock_v_luma_intra_10_sse2,4

cglobal alignstubbed_deblock_h_luma_intra_10_sse2	
STACKALIGNSTUB deblock_h_luma_intra_10_sse2,4

cglobal alignstubbed_h264_idct8_add_10_avx			
STACKALIGNSTUB h264_idct8_add_10_avx,3

cglobal alignstubbed_h264_idct8_add4_10_avx			
STACKALIGNSTUB h264_idct8_add4_10_avx,5

cglobal alignstubbed_deblock_v_luma_10_avx			
STACKALIGNSTUB deblock_v_luma_10_avx,5

cglobal alignstubbed_deblock_h_luma_10_avx			
STACKALIGNSTUB deblock_h_luma_10_avx,5

cglobal alignstubbed_deblock_v_luma_intra_10_avx	
STACKALIGNSTUB deblock_v_luma_intra_10_avx,4

cglobal alignstubbed_deblock_h_luma_intra_10_avx	
STACKALIGNSTUB deblock_h_luma_intra_10_avx,4

; Stubs for vp8dsp.asm (somewhat misplaced here...)

cglobal alignstubbed_vp8_h_loop_filter16y_mbedge_sse4	
STACKALIGNSTUB vp8_h_loop_filter16y_mbedge_sse4,5

cglobal alignstubbed_vp8_h_loop_filter8uv_mbedge_sse4
STACKALIGNSTUB vp8_h_loop_filter8uv_mbedge_sse4,6

cglobal alignstubbed_vp8_v_loop_filter16y_inner_ssse3
STACKALIGNSTUB vp8_v_loop_filter16y_inner_ssse3,5

cglobal alignstubbed_vp8_h_loop_filter16y_inner_ssse3
STACKALIGNSTUB vp8_h_loop_filter16y_inner_ssse3,5

cglobal alignstubbed_vp8_v_loop_filter8uv_inner_ssse3
STACKALIGNSTUB vp8_v_loop_filter8uv_inner_ssse3,6

cglobal alignstubbed_vp8_h_loop_filter8uv_inner_ssse3
STACKALIGNSTUB vp8_h_loop_filter8uv_inner_ssse3,6

cglobal alignstubbed_vp8_v_loop_filter16y_mbedge_ssse3
STACKALIGNSTUB vp8_v_loop_filter16y_mbedge_ssse3,5

cglobal alignstubbed_vp8_h_loop_filter16y_mbedge_ssse3
STACKALIGNSTUB vp8_h_loop_filter16y_mbedge_ssse3,5

cglobal alignstubbed_vp8_v_loop_filter8uv_mbedge_ssse3
STACKALIGNSTUB vp8_v_loop_filter8uv_mbedge_ssse3,6

cglobal alignstubbed_vp8_h_loop_filter8uv_mbedge_ssse3
STACKALIGNSTUB vp8_h_loop_filter8uv_mbedge_ssse3,6

cglobal alignstubbed_vp8_h_loop_filter16y_inner_sse2
STACKALIGNSTUB vp8_h_loop_filter16y_inner_sse2,5

cglobal alignstubbed_vp8_h_loop_filter8uv_inner_sse2
STACKALIGNSTUB vp8_h_loop_filter8uv_inner_sse2,6

cglobal alignstubbed_vp8_h_loop_filter16y_mbedge_sse2
STACKALIGNSTUB vp8_h_loop_filter16y_mbedge_sse2,5

cglobal alignstubbed_vp8_h_loop_filter8uv_mbedge_sse2
STACKALIGNSTUB vp8_h_loop_filter8uv_mbedge_sse2,6

cglobal alignstubbed_vp8_v_loop_filter16y_inner_sse2
STACKALIGNSTUB vp8_v_loop_filter16y_inner_sse2,5

cglobal alignstubbed_vp8_v_loop_filter8uv_inner_sse2
STACKALIGNSTUB vp8_v_loop_filter8uv_inner_sse2,6

cglobal alignstubbed_vp8_v_loop_filter16y_mbedge_sse2
STACKALIGNSTUB vp8_v_loop_filter16y_mbedge_sse2,5

cglobal alignstubbed_vp8_v_loop_filter8uv_mbedge_sse2
STACKALIGNSTUB vp8_v_loop_filter8uv_mbedge_sse2,6

;cglobal idct_xvid_sse2
;STACKALIGNSTUB idct_xvid_sse2_,1


%endif