# 1 "simple_idct_armv6.S"
# 1 "<built-in>"
# 1 "<command line>"
# 1 "simple_idct_armv6.S"
# 41 "simple_idct_armv6.S"
        .text
        .align
w13: .long (22725 | (19266 << 16))
w26: .long (21407 | (8867 << 16))
w42: .long (16383 | (21407 << 16))
w42n: .long (-16383&0xffff | (-21407 << 16))
w46: .long (16383 | (8867 << 16))
w57: .long (12873 | (4520 << 16))
# 60 "simple_idct_armv6.S"
        .macro idct_row shift
        ldr lr, [pc, #(w46-.-8)]
        mov a2, #(1<<(\shift-1))
        smlad v1, a3, ip, a2
        smlsd v4, a3, ip, a2
        ldr ip, [pc, #(w13-.-8)]
        ldr v7, [pc, #(w57-.-8)]
        smlad v2, a3, lr, a2
        smlsd v3, a3, lr, a2

        smuad v5, a4, ip
        smusdx fp, a4, v7
        ldr lr, [a1, #12]
        pkhtb a3, ip, v7, asr #16
        pkhbt a2, ip, v7, lsl #16
        smusdx v6, a3, a4
        smlad v5, lr, v7, v5
        smusdx v7, a4, a2

        ldr a4, [pc, #(w42n-.-8)]
        smlad v7, lr, a3, v7
        ldr a3, [a1, #4]
        smlsdx fp, lr, ip, fp
        ldr ip, [pc, #(w46-.-8)]
        smlad v6, lr, a2, v6

        smlad v2, a3, a4, v2
        smlsd v3, a3, a4, v3
        smlad v1, a3, ip, v1
        smlsd v4, a3, ip, v4
        .endm
# 101 "simple_idct_armv6.S"
        .macro idct_row4 shift
        ldr lr, [pc, #(w46-.-8)]
        ldr v7, [pc, #(w57-.-8)]
        mov a2, #(1<<(\shift-1))
        smlad v1, a3, ip, a2
        smlsd v4, a3, ip, a2
        ldr ip, [pc, #(w13-.-8)]
        smlad v2, a3, lr, a2
        smlsd v3, a3, lr, a2
        smusdx fp, a4, v7
        smuad v5, a4, ip
        pkhtb a3, ip, v7, asr #16
        pkhbt a2, ip, v7, lsl #16
        smusdx v6, a3, a4
        smusdx v7, a4, a2
        .endm






        .macro idct_finish
        add ip, v1, v5
        sub lr, v1, v5
        sub v1, v2, v6
        add v5, v2, v6
        add v2, v3, v7
        sub v6, v3, v7
        add v3, v4, fp
        sub v7, v4, fp
        .endm






        .macro idct_finish_shift shift
        add a4, v1, v5
        sub a3, v1, v5
        mov v1, a4, asr #\shift
        mov v5, a3, asr #\shift

        sub a4, v2, v6
        add a3, v2, v6
        mov v2, a4, asr #\shift
        mov v6, a3, asr #\shift

        add a4, v3, v7
        sub a3, v3, v7
        mov v3, a4, asr #\shift
        mov v7, a3, asr #\shift

        add a4, v4, fp
        sub a3, v4, fp
        mov v4, a4, asr #\shift
        mov fp, a3, asr #\shift
        .endm






        .macro idct_finish_shift_sat shift
        add a4, v1, v5
        sub ip, v1, v5
        usat v1, #8, a4, asr #\shift
        usat v5, #8, ip, asr #\shift

        sub a4, v2, v6
        add ip, v2, v6
        usat v2, #8, a4, asr #\shift
        usat v6, #8, ip, asr #\shift

        add a4, v3, v7
        sub ip, v3, v7
        usat v3, #8, a4, asr #\shift
        usat v7, #8, ip, asr #\shift

        add a4, v4, fp
        sub ip, v4, fp
        usat v4, #8, a4, asr #\shift
        usat fp, #8, ip, asr #\shift
        .endm






        .align
        .func idct_row_armv6
idct_row_armv6:
        str lr, [sp, #-4]!

        ldr lr, [a1, #12]
        ldr ip, [a1, #4]
        ldr a4, [a1, #8]
        ldr a3, [a1]
        orrs lr, lr, ip
        cmpeq lr, a4
        cmpeq lr, a3, lsr #16
        beq 1f
        str a2, [sp, #-4]!
        ldr ip, [pc, #(w42-.-8)]
        cmp lr, #0
        beq 2f

        idct_row 11
        b 3f

2: idct_row4 11

3: ldr a2, [sp], #4
        idct_finish_shift 11

        strh v1, [a2]
        strh v2, [a2, #(16*2)]
        strh v3, [a2, #(16*4)]
        strh v4, [a2, #(16*6)]
        strh fp, [a2, #(16*1)]
        strh v7, [a2, #(16*3)]
        strh v6, [a2, #(16*5)]
        strh v5, [a2, #(16*7)]

        ldr pc, [sp], #4

1: mov a3, a3, lsl #3
        strh a3, [a2]
        strh a3, [a2, #(16*2)]
        strh a3, [a2, #(16*4)]
        strh a3, [a2, #(16*6)]
        strh a3, [a2, #(16*1)]
        strh a3, [a2, #(16*3)]
        strh a3, [a2, #(16*5)]
        strh a3, [a2, #(16*7)]
        ldr pc, [sp], #4
        .endfunc






        .align
        .func idct_col_armv6
idct_col_armv6:
        stmfd sp!, {a2, lr}

        ldr a3, [a1]
        ldr ip, [pc, #(w42-.-8)]
        ldr a4, [a1, #8]
        idct_row 20
        ldr a2, [sp], #4
        idct_finish_shift 20

        strh v1, [a2]
        strh v2, [a2, #(16*1)]
        strh v3, [a2, #(16*2)]
        strh v4, [a2, #(16*3)]
        strh fp, [a2, #(16*4)]
        strh v7, [a2, #(16*5)]
        strh v6, [a2, #(16*6)]
        strh v5, [a2, #(16*7)]

        ldr pc, [sp], #4
        .endfunc







        .align
        .func idct_col_put_armv6
idct_col_put_armv6:
        stmfd sp!, {a2, a3, lr}

        ldr a3, [a1]
        ldr ip, [pc, #(w42-.-8)]
        ldr a4, [a1, #8]
        idct_row 20
        ldmfd sp!, {a2, a3}
        idct_finish_shift_sat 20

        strb v1, [a2], a3
        strb v2, [a2], a3
        strb v3, [a2], a3
        strb v4, [a2], a3
        strb fp, [a2], a3
        strb v7, [a2], a3
        strb v6, [a2], a3
        strb v5, [a2], a3

        sub a2, a2, a3, lsl #3

        ldr pc, [sp], #4
        .endfunc







        .align
        .func idct_col_add_armv6
idct_col_add_armv6:
        stmfd sp!, {a2, a3, lr}

        ldr a3, [a1]
        ldr ip, [pc, #(w42-.-8)]
        ldr a4, [a1, #8]
        idct_row 20
        ldmfd sp!, {a2, a3}
        idct_finish

        ldrb a4, [a2]
        ldrb v4, [a2, a3]
        ldrb fp, [a2, a3, lsl #2]
        add ip, a4, ip, asr #20
        usat ip, #8, ip
        add v1, v4, v1, asr #20
        strb ip, [a2], a3
        ldrb ip, [a2, a3]
        usat v1, #8, v1
        ldrb fp, [a2, a3, lsl #2]
        add v2, ip, v2, asr #20
        usat v2, #8, v2
        strb v1, [a2], a3
        ldrb a4, [a2, a3]
        ldrb ip, [a2, a3, lsl #2]
        strb v2, [a2], a3
        ldrb v4, [a2, a3]
        ldrb v1, [a2, a3, lsl #2]
        add v3, a4, v3, asr #20
        usat v3, #8, v3
        add v7, v4, v7, asr #20
        usat v7, #8, v7
        add v6, fp, v6, asr #20
        usat v6, #8, v6
        add v5, ip, v5, asr #20
        usat v5, #8, v5
        add lr, v1, lr, asr #20
        usat lr, #8, lr
        strb v3, [a2], a3
        strb v7, [a2], a3
        strb v6, [a2], a3
        strb v5, [a2], a3
        strb lr, [a2], a3

        sub a2, a2, a3, lsl #3

        ldr pc, [sp], #4
        .endfunc






        .macro idct_rows func width
        bl \func
        add a1, a1, #(16*2)
        add a2, a2, #\width
        bl \func
        add a1, a1, #(16*2)
        add a2, a2, #\width
        bl \func
        add a1, a1, #(16*2)
        add a2, a2, #\width
        bl \func
        sub a1, a1, #(16*5)
        add a2, a2, #\width
        bl \func
        add a1, a1, #(16*2)
        add a2, a2, #\width
        bl \func
        add a1, a1, #(16*2)
        add a2, a2, #\width
        bl \func
        add a1, a1, #(16*2)
        add a2, a2, #\width
        bl \func

        sub a1, a1, #(16*7)
        .endm

        .align
        .global ff_simple_idct_armv6
        .func ff_simple_idct_armv6

ff_simple_idct_armv6:
        stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, lr}
        sub sp, sp, #128

        mov a2, sp
        idct_rows idct_row_armv6, 2
        mov a2, a1
        mov a1, sp
        idct_rows idct_col_armv6, 2

        add sp, sp, #128
        ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
        .endfunc

        .align
        .global ff_simple_idct_add_armv6
        .func ff_simple_idct_add_armv6

ff_simple_idct_add_armv6:
        stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
        sub sp, sp, #128

        mov a1, a3
        mov a2, sp
        idct_rows idct_row_armv6, 2
        mov a1, sp
        ldr a2, [sp, #128]
        ldr a3, [sp, #(128+4)]
        idct_rows idct_col_add_armv6, 1

        add sp, sp, #(128+8)
        ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
        .endfunc

        .align
        .global ff_simple_idct_put_armv6
        .func ff_simple_idct_put_armv6

ff_simple_idct_put_armv6:
        stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
        sub sp, sp, #128

        mov a1, a3
        mov a2, sp
        idct_rows idct_row_armv6, 2
        mov a1, sp
        ldr a2, [sp, #128]
        ldr a3, [sp, #(128+4)]
        idct_rows idct_col_put_armv6, 1

        add sp, sp, #(128+8)
        ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
        .endfunc
