;
; Copyright (c) 2011 Mans Rullgard <mans;mansr.com>
;
; This file is part of FFmpeg.
;
; FFmpeg is free software; you can redistribute it and/or
; modify it under the terms of the GNU Lesser General Public
; License as published by the Free Software Foundation; either
; version 2.1 of the License, or (at your option) any later version.
;
; FFmpeg is distributed in the hope that it will be useful,
; but WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
; Lesser General Public License for more details.
;
; You should have received a copy of the GNU Lesser General Public
; License along with FFmpeg; if not, write to the Free Software
; Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;/

;#include "libavutil/arm/asm.S"
	AREA .text,CODE,THUMB 
		MACRO
	    bflies          $d0,  $d1,  $r0,  $r1
        vrev64.32       $r0, $d1                ; t5, t6, t1, t2
        vhsub.s16       $r1, $d1, $r0           ; t1-t5, t2-t6, t5-t1, t6-t2
        vhadd.s16       $r0, $d1, $r0           ; t1+t5, t2+t6, t5+t1, t6+t2
        vext.16         $r1, $r1, $r1, #1       ; t2-t6, t5-t1, t6-t2, t1-t5
        vtrn.32         $r0, $r1                ; t1+t5, t2+t6, t2-t6, t5-t1
                                                ; t5,    t6,    t4,    t3
        vhsub.s16       $d1, $d0, $r0
        vhadd.s16       $d0, $d0, $r0
		MEND

		MACRO
	    transform01     $q0,  $q1,  $d3,  $c0,  $c1,  $r0,  $w0,  $w1
        vrev32.16       $r0, $d3
        vmull.s16       $w0, $d3, $c0
        vmlal.s16       $w0, $r0, $c1
        vshrn.s32       $d3, $w0, #15
        bflies          $q0, $q1, $w0, $w1
		MEND

		MACRO
	    transform2      $d0,  $d1,  $d2,  $d3,  $q0,  $q1,  $c0,  $c1,  $c2,  $c3, \
                        $r0,  $r1,  $w0,  $w1
        vrev32.16       $r0, $d1
        vrev32.16       $r1, $d3
        vmull.s16       $w0, $d1, $c0
        vmlal.s16       $w0, $r0, $c1
        vmull.s16       $w1, $d3, $c2
        vmlal.s16       $w1, $r1, $c3
        vshrn.s32       $d1, $w0, #15
        vshrn.s32       $d3, $w1, #15
        bflies          $q0, $q1, $w0, $w1
		MEND

		MACRO
	    fft4            $d0,  $d1,  $r0,  $r1
        vhsub.s16       $r0, $d0, $d1           ; t3, t4, t8, t7
        vhsub.s16       $r1, $d1, $d0
        vhadd.s16       $d0, $d0, $d1           ; t1, t2, t6, t5
        vmov.i64        $d1, #0xffff00000000
        vbit            $r0, $r1, $d1
        vrev64.16       $r1, $r0                ; t7, t8, t4, t3
        vtrn.32         $r0, $r1                ; t3, t4, t7, t8
        vtrn.32         $d0, $r0                ; t1, t2, t3, t4, t6, t5, t8, t7
        vhsub.s16       $d1, $d0, $r0           ; r2, i2, r3, i1
        vhadd.s16       $d0, $d0, $r0           ; r0, i0, r1, i3
		MEND

		MACRO
	    fft8            $d0,  $d1,  $d2,  $d3,  $q0,  $q1,  $c0,  $c1,  $r0,  $r1, $w0, $w1
        fft4            $d0, $d1, $r0, $r1
        vtrn.32         $d0, $d1                ; z0, z2, z1, z3
        vhadd.s16       $r0, $d2, $d3           ; t1, t2, t3, t4
        vhsub.s16       $d3, $d2, $d3           ; z5, z7
        vmov            $d2, $r0
        transform01     $q0, $q1, $d3, $c0, $c1, $r0, $w0, $w1
		MEND

fft4_neon	PROC
        vld1.16         {d0-d1},  [r0]
        fft4            d0,  d1,  d2,  d3
        vst1.16         {d0-d1},  [r0]
        bx              lr
		ENDP

fft8_neon	PROC
        vld1.16         {d0-d3},  [r0@128]
        ldr          r1,  =coefs
        vld1.16         {d30},    [r1@64]
        vdup.16         d31, d30[0]
        fft8            d0,  d1,  d2,  d3,  q0,  q1,  d31, d30, d20, d21, q8, q9
        vtrn.32         d0,  d1
        vtrn.32         d2,  d3
        vst1.16         {d0-d3},  [r0@128]
        bx              lr
		ENDP

fft16_neon	PROC
        vld1.16         {d0-d3},  [r0@128]!
        vld1.16         {d4-d7},  [r0@128]
        ldr          r1,  =coefs
        sub             r0,  r0,  #32
        vld1.16         {d28-d31},[r1@128]
        vdup.16         d31, d28[0]
        fft8            d0,  d1,  d2,  d3,  q0,  q1,  d31, d28, d20, d21, q8, q9
        vswp            d5,  d6
        fft4            q2,  q3,  q8,  q9
        vswp            d5,  d6
        vtrn.32         q0,  q1             ; z0, z4, z2, z6, z1, z5, z3, z7
        vtrn.32         q2,  q3             ; z8, z12,z10,z14,z9, z13,z11,z15
        vswp            d1,  d2
        vdup.16         d31, d28[0]
        transform01     q0,  q2,  d5,  d31, d28, d20, q8, q9
        vdup.16         d26, d29[0]
        vdup.16         d27, d30[0]
        transform2      d2,  d6,  d3,  d7,  q1,  q3,  d26, d30, d27, d29, \
                        d20, d21, q8,  q9
        vtrn.32         q0,  q1
        vtrn.32         q2,  q3
        vst1.16         {d0-d3},  [r0@128]!
        vst1.16         {d4-d7},  [r0@128]
        bx              lr
		ENDP

fft_pass_neon	PROC
        push            {r4,lr}
        ldr          lr,  =coefs+24
        vld1.16         {d30},    [lr@64]
        lsl             r12, r2,  #3
        vmov            d31, d30
        add             r3,  r1,  r2,  lsl #2
        mov             lr,  #-8
        sub             r3,  r3,  #2
        mov             r4,  r0
        vld1.16         {d27[]},  [r3@16]
        sub             r3,  r3,  #6
        vld1.16         {q0},     [r4@128], r12
        vld1.16         {q1},     [r4@128], r12
        vld1.16         {q2},     [r4@128], r12
        vld1.16         {q3},     [r4@128], r12
        vld1.16         {d28},    [r1@64]!
        vld1.16         {d29},    [r3@64], lr
        vswp            d1,  d2
        vswp            d5,  d6
        vtrn.32         d0,  d1
        vtrn.32         d4,  d5
        vdup.16         d25, d28[1]
        vmul.s16        d27, d27, d31
        transform01     q0,  q2,  d5,  d25, d27, d20, q8,  q9
        b               %f2
1
        mov             r4,  r0
        vdup.16         d26, d29[0]
        vld1.16         {q0},     [r4@128], r12
        vld1.16         {q1},     [r4@128], r12
        vld1.16         {q2},     [r4@128], r12
        vld1.16         {q3},     [r4@128], r12
        vld1.16         {d28},    [r1@64]!
        vld1.16         {d29},    [r3@64], lr
        vswp            d1,  d2
        vswp            d5,  d6
        vtrn.32         d0,  d1
        vtrn.32         d4,  d5
        vdup.16         d24, d28[0]
        vdup.16         d25, d28[1]
        vdup.16         d27, d29[3]
        vmul.s16        q13, q13, q15
        transform2      d0,  d4,  d1,  d5,  q0,  q2,  d24, d26, d25, d27, \
                        d16, d17, q9,  q10
2
        vtrn.32         d2,  d3
        vtrn.32         d6,  d7
        vdup.16         d24, d28[2]
        vdup.16         d26, d29[2]
        vdup.16         d25, d28[3]
        vdup.16         d27, d29[1]
        vmul.s16        q13, q13, q15
        transform2      d2,  d6,  d3,  d7,  q1,  q3,  d24, d26, d25, d27, \
                        d16, d17, q9,  q10
        vtrn.32         d0,  d1
        vtrn.32         d2,  d3
        vtrn.32         d4,  d5
        vtrn.32         d6,  d7
        vswp            d1,  d2
        vswp            d5,  d6
        mov             r4,  r0
        vst1.16         {q0},     [r4@128], r12
        vst1.16         {q1},     [r4@128], r12
        vst1.16         {q2},     [r4@128], r12
        vst1.16         {q3},     [r4@128], r12
        add             r0,  r0,  #16
        subs            r2,  r2,  #2
        bgt             %b1
        pop             {r4,pc}
		ENDP

F_SQRT1_2   EQU 23170
F_COS_16_1  EQU 30274
F_COS_16_3  EQU 12540

coefs DCI F_SQRT1_2, -F_SQRT1_2, -F_SQRT1_2,  F_SQRT1_2
	  DCI F_COS_16_1,-F_COS_16_1,-F_COS_16_1, F_COS_16_1
	  DCI F_COS_16_3,-F_COS_16_3,-F_COS_16_3, F_COS_16_3
	  DCI  1,         -1,         -1,          1
	  
		MACRO
	    def_fft $n, $n2, $n4
		EXTERN ff_cos_$n._fixed
fft$n._neon	PROC
        push            {r4, lr}
        mov             r4,  r0
        bl              fft$n2._neon
        add             r0,  r4,  #$n4*2*4
        bl              fft$n4._neon
        add             r0,  r4,  #$n4*3*4
        bl              fft$n4._neon
        mov             r0,  r4
        pop             {r4, lr}
        ldr             r1,  =ff_cos_$n._fixed
        mov             r2,  #$n4/2
        b               fft_pass_neon
		ENDP
		MEND

        def_fft    32,    16,     8
        def_fft    64,    32,    16
        def_fft   128,    64,    32
        def_fft   256,   128,    64
        def_fft   512,   256,   128
        def_fft  1024,   512,   256
        def_fft  2048,  1024,   512
        def_fft  4096,  2048,  1024
        def_fft  8192,  4096,  2048
        def_fft 16384,  8192,  4096
        def_fft 32768, 16384,  8192
        def_fft 65536, 32768, 16384

		
		EXPORT ff_fft_fixed_calc_neon
ff_fft_fixed_calc_neon	PROC
        ldr             r2,  [r0]
        sub             r2,  r2,  #2
        ldr          r3,  =fft_fixed_tab_neon
        ldr             r3,  [r3, r2, lsl #2]
        mov             r0,  r1
        bx              r3
		ENDP

		AREA .rodata, DATA, ARM



fft_fixed_tab_neon	DCD fft4_neon
        DCD fft8_neon
        DCD fft16_neon
        DCD fft32_neon
        DCD fft64_neon
        DCD fft128_neon
        DCD fft256_neon
        DCD fft512_neon
        DCD fft1024_neon
        DCD fft2048_neon
        DCD fft4096_neon
        DCD fft8192_neon
        DCD fft16384_neon
        DCD fft32768_neon
        DCD fft65536_neon

		END
