/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2018-2021.
 * Description: suport zImage compress/decompress on arm64
 * Author: xiaowei
 * Create: 2018-10-05
 */

#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <asm/assembler.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
#include <asm/cputype.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/virt.h>

#ifdef CONFIG_RTOS_HAL_SELFDECOMPRESS_MAP_PHYS_ADDR_ABOVE_4G
#include "peripheral_virt_addr.h"
#endif

#include "efi-header.S"
/*
 * read_ctr - read CTR_EL0.
 */
	.purgem read_ctr
        .macro  read_ctr, reg
        mrs     \reg, ctr_el0                   // read CTR
        nop
        .endm


	.macro	swap tmp
	CPU_BE(rev	\tmp, \tmp)
	.endm

	.macro	print
	swap     w22
	str       w22, [x23]
	.endm

#define MAIR(attr, mt)  ((attr) << ((mt) * 8))

#define TCR_TG_FLAGS    TCR_TG0_4K | TCR_TG1_4K

#ifdef CONFIG_SMP
#define TCR_SMP_FLAGS   TCR_SHARED
#else
#define TCR_SMP_FLAGS   0
#endif

/* PTWs cacheable, inner/outer WBWA */
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA


#ifdef CONFIG_ARM64_64K_PAGES
/* We always create 2M page mapping in the zImage decompression,
 * no matter the page size is 4K, 16K or 64K.
 *  +--------+--------+--------+--------+--------+--------+--------+--------+
 *  |63    56|55    48|47    40|39    32|31    24|23    16|15     8|7      0|
 *  +--------+--------+--------+--------+--------+--------+--------+--------+
 *   |                 |         |         |         |
 *   |                 |         |         |         |
 *   |                 |         |         |         |
 *   |                 |         |         |         +-> [20:0]  in-block offset
 *   |                 |         |         +-----------> [29:21] block index, [21] BLOCK_SHIFT
 *   |                 |         +---------------------> [38:30] table index, [30] TABLE_SHIFT_2M
 *   |                 +-------------------------------> [47:39] pgdir index, [39] PGDIR_SHIFT_2M
 *   +-------------------------------------------------> [63] TTBR0/1
 */
#define VA_BITS_48      48
#define PAGE_SIZE_4K    (1 << 12)
#define BLOCK_SHIFT     21
#define BLOCK_SIZE      (1 << BLOCK_SHIFT)
#define TABLE_SHIFT_2M  30
#define PGDIR_SHIFT_2M  39
#define PTRS_PER_PGD_2M (1 << 9)
#define PTRS_PER_PMD_2M (1 << 9)
#define PTRS_PER_PTE_2M (1 << 9)
#else
#define BLOCK_SHIFT     SECTION_SHIFT
#define BLOCK_SIZE      SECTION_SIZE
#define TABLE_SHIFT     PUD_SHIFT
#endif


/*
 * Initial memory map attributes.
 */
#ifndef CONFIG_SMP
#define PTE_FLAGS       PTE_TYPE_PAGE | PTE_AF
#define PMD_FLAGS       PMD_TYPE_SECT | PMD_SECT_AF
#else
#define PTE_FLAGS       PTE_TYPE_PAGE | PTE_AF | PTE_SHARED
#define PMD_FLAGS       PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S
#endif

#define MM_MMUFLAGS     PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS
#define DEV_MMUFLAGS    PMD_ATTRINDX(MT_DEVICE_nGnRE) | PMD_FLAGS

	.macro push_regs
        sub     sp, sp, #16
        stp     x0, x1, [sp], #-16
        stp     x2, x3, [sp], #-16
        stp     x4, x5, [sp], #-16
        stp     x6, x7, [sp], #-16
        stp     x8, x9, [sp], #-16
        stp     x10, x11, [sp], #-16
        stp     x12, x13, [sp]
	.endm

	.macro pop_regs
        ldp     x12, x13, [sp], #16
        ldp     x10, x11, [sp], #16
        ldp     x8, x9, [sp], #16
        ldp     x6, x7, [sp], #16
        ldp     x4, x5, [sp], #16
        ldp     x2, x3, [sp], #16
        ldp     x0, x1, [sp], #16
	.endm

#ifdef CONFIG_RTOS_HAL_DEBUG_LL

	.macro print_c, val
		push_regs
		ldr x5, uart_addr
		mov x6, \val
#ifdef CONFIG_CPU_BIG_ENDIAN
		rev x6, x6
#endif
		str x6, [x5]
		mov x6, #'\n'
#ifdef CONFIG_CPU_BIG_ENDIAN
		rev x6, x6
#endif
		str x6, [x5]
		pop_regs
	.endm

	.macro print_n, val
		push_regs
		mov x0, \val
		ldr x1, uart_addr
		ldr x2, uart_addr
		bl print_num
		pop_regs
	.endm
#endif

	.type	wont_overwrite, #function
	.type	restart, #function
	.type	not_relocated, #function
#ifdef CONFIG_RTOS_HAL_SELFDECOMPRESS_ACPI
        __HEAD
_head:
        /*
         * DO NOT MODIFY. Image header expected by Linux boot-loaders.
         * This add instruction has no meaningful effect except that
         * its opcode forms the magic "MZ" signature required by UEFI.
         */
        add     x13, x18, #0x16
        b       start
        .quad   0                               // reserved
        .quad   0                               // reserved
        .quad   0                               // reserved
        .quad   0                               // reserved
        .quad   0                               // reserved
        .quad   0                               // reserved
        .byte   0x41                            // Magic number, "ARM\x64"
        .byte   0x52
        .byte   0x4d
        .byte   0x64
        .long   pe_header - _head               // Offset to the PE header.
        .align 3
pe_header:
	__EFI_PE_HEADER

ENTRY(start)
#else
start:
#endif
	.rept   8
	mov     x0, x0
	.endr
	mov	x10, x0
	mov	x11, x1
	mov	x12, x2
	mov	x13, x3
	bl	el2_setup
	bl	save_boot_mode
	bl 	save_args
	adr_l	sp, .L_user_stack_end, x0
	mov	x0, sp
        adr     x4, start
	lsr	x4, x4, #21
	lsl	x4, x4, #21
#ifdef CONFIG_RTOS_HAL_2M_TEXT_OFFSET
	/*
	 * Add 2M offset to start address of second kernel.
	 * Avoid destroying dtb of some boards after getting
	 * rid of TEXT_OFFSET on kernel 5.8.
	 */
	add	x4, x4, #0x200000
#endif
	mov	x25, x4
restart:
	adr	x0, LC0
	ldp	x1, x2, [x0]
	ldp     x3, x6, [x0, #16]
	ldp     x10, x11, [x0, #32]
	ldp	x12, x13, [x0, #48]

	sub	x0, x0, x1
	add	x6, x6, x0
	add	x10, x10, x0
	add	sp, x13, x0

	mov	x9, #0
	ldrb    w9, [x10, #0]
	ldrb    w14, [x10, #1]
	orr     w9, w9, w14, lsl #8
	ldrb    w14, [x10, #2]
	ldrb    w10, [x10, #3]
	orr     w9, w9, w14, lsl #16
	orr     w9, w9, w10, lsl #24
	add     x10, sp, #0x10000
	mov	x5, #0

/*
 *   r0  = delta
 *   x2  = BSS start
 *   x3  = BSS end
 *   x4  = final kernel address (possibly with LSB set)
 *   x5  = appended dtb size (still unknown)
 *   x6  = _edata
 *   x7  = architecture ID
 *   x8  = atags/device tree pointer
 *   x9  = size of decompressed image
 *   x10 = end of this image, including  bss/stack/malloc space if non XIP
 *   x11 = GOT start
 *   x12 = GOT end
 *   sp  = stack pointer
 *
 * if there are device trees (dtb) appended to zImage, advance r10 so that the
 * dtb data will get relocated along with the kernel if necessary.
 */


/*
 * Check to see if we will overwrite ourselves.
 *   x4  = final kernel address (possibly with LSB set)
 *   x9  = size of decompressed image
 *   x10 = end of this image, including  bss/stack/malloc space if non XIP
 * We basically want:
 *   x4 - 16k page directory >= r10 -> OK
 *   x4 + image length <= address of wont_overwrite -> OK
 * Note: the possible LSB in r4 is harmless here.
 */

        add     x10, x10, #0x10000
        cmp     x4, x10
        bhs     wont_overwrite
        add     x10, x4, x9
        adr     x9, wont_overwrite
        cmp     x10, x9
        bls     wont_overwrite
/*
 * Relocate ourselves past the end of the decompressed kernel.
 *   x6  = _edata
 *   x10 = end of the decompressed kernel
 * Because we always copy ahead, we need to do it from the end and go
 * backward in case the source and destination overlap.
 */
        /*
         * Bump to the next page with the size of
         * the relocation code added. This avoids overwriting
         * ourself when the offset is small.
         */
	mov	x13, #((reloc_code_end - restart + 0x1000) & ~0xFFF)
	add	x10, x10, x13
	bic	x10, x10, #0xFFF

//	mov	x26, x10
//	add	x10, x10, 0x10000


        /* Get start of code we want to copy and align it down. */
        adr     x5, restart
        bic     x5, x5, #0xFFF   //page align

	sub	x9, x6, x5    //size = _edata - restart
	add	x9, x9, #0xFFF
	bic	x9, x9, #0xFFF   //page align
	add	x6, x9, x5    // endof zImage addr now
	add	x9, x9, x10   // endof zImage final addr
	add	x26, x9, 0x300000
	bic	x26, x26, #0x1ffff  //ttbr0
#ifdef CONFIG_RTOS_HAL_SELFDECOMPRESS_MAP_PHYS_ADDR_ABOVE_4G
	adr	x27, tbl_addr
	str	x26, [x27]
#endif
	bl	__create_page_tables
1:
	//copy 256 BYTE per loop
	sub	x6, x6, #256
	sub	x9, x9, #256
	ldp	x13, x14, [x6]
	stp	x13, x14, [x9]
        ldp     x13, x14, [x6, #1 * 16]
        stp     x13, x14, [x9, #1 * 16]
        ldp     x13, x14, [x6, #2 * 16]
        stp     x13, x14, [x9, #2 * 16]
        ldp     x13, x14, [x6, #3 * 16]
        stp     x13, x14, [x9, #3 * 16]
        ldp     x13, x14, [x6, #4 * 16]
        stp     x13, x14, [x9, #4 * 16]
        ldp     x13, x14, [x6, #5 * 16]
        stp     x13, x14, [x9, #5 * 16]
        ldp     x13, x14, [x6, #6 * 16]
        stp     x13, x14, [x9, #6 * 16]
        ldp     x13, x14, [x6, #7 * 16]
        stp     x13, x14, [x9, #7 * 16]
        ldp     x13, x14, [x6, #8 * 16]
        stp     x13, x14, [x9, #8 * 16]
        ldp     x13, x14, [x6, #9 * 16]
        stp     x13, x14, [x9, #9 * 16]
        ldp     x13, x14, [x6, #10 * 16]
        stp     x13, x14, [x9, #10 * 16]
        ldp     x13, x14, [x6, #11 * 16]
        stp     x13, x14, [x9, #11 * 16]
        ldp     x13, x14, [x6, #12 * 16]
        stp     x13, x14, [x9, #12 * 16]
        ldp     x13, x14, [x6, #13 * 16]
        stp     x13, x14, [x9, #13 * 16]
        ldp     x13, x14, [x6, #14 * 16]
        stp     x13, x14, [x9, #14 * 16]
        ldp     x13, x14, [x6, #15 * 16]
        stp     x13, x14, [x9, #15 * 16]
	cmp	x6, x5
	bne	1b
        /* Preserve offset to relocated code. */
        sub     x6, x9, x6
	mov	x0, x5
	mov	x1, #0x2000000
	push_regs
	bl	__dma_flush_area
	pop_regs
	adr	x0, restart
	add	x0, x0, x6
	adr	x20, boot_mode
	ldr	x20, [x20]
	cmp	w20, #BOOT_CPU_MODE_EL2
	b.ne	no_set_vector
	add	x22, x22, x6
	hvc	#HVC_SET_VECTORS
no_set_vector:
	ldr	x1, [x0]
	ldr	x1, [x0]
	br	x0

wont_overwrite:
/*
 * If delta is zero, we are running at the address we were linked at.
 *   x0  = delta
 *   x2  = BSS start
 *   x3  = BSS end
 *   x4  = kernel execution address (possibly with LSB set)
 *   x5  = appended dtb size (0 if not present)
 *   x7  = architecture ID
 *   x8  = atags pointer
 *   x11 = GOT start
 *   x12 = GOT end
 *   sp  = stack pointer
 */
        orr     x1, x0, x5
        cbz     x1, not_relocated

        add     x11, x11, x0
        add     x12, x12, x0

        /*
         * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
         * we need to fix up pointers into the BSS region.
         * Note that the stack pointer has already been fixed up.
         */
        add     x2, x2, x0
        add     x3, x3, x0

        /*
         * Relocate all entries in the GOT table.
         * Bump bss entries to _edata + dtb size
         */

1:      ldr     x1, [x11, #0]           // relocate entries in the GOT
        add     x1, x1, x0              // This fixes up C references
        cmp     x1, x2                  // if the entrt is bss section
        blo	2f
	cmp     x1, x3                   // relocate it to the tail
	bhs	2f
        add   x1, x1, x5              // of zImage-dtb
2:
        str     x1, [x11], #8           // next entry
        cmp     x11, x12
        blo     1b


        /* relocate the bss section from the tail of zImage to the tail of zImage-dtb */
        add     x2, x2, x5
        add     x3, x3, x5

not_relocated:
	mov     x0, #0
1:      stp     x0, x0, [x2], #16            // clear bss
        cmp     x2, x3
        blo     1b

	ldr	x0, dtb_addr
	push_regs
	bl	find_info_from_dtb
	//bl	add_peripheral_page_tables
#ifdef CONFIG_RTOS_HAL_SELFDECOMPRESS_MAP_PHYS_ADDR_ABOVE_4G
	bl	map_uart_addr_above_4G
#endif
	pop_regs
	mov	x0, x4
	mov     x1, sp
	add     x2, sp, #0x10000
	bl	decompress_kernel

	isb
	dsb	sy
	mov	x0, x25
	add	x1, sp, #0x10000
	sub	x1, x1, x0
	bl      __flush_dcache_area

	isb
	dsb	sy
	mrs	x0, sctlr_el1
	mov	x1, #0x005
	bic	x0, x0, x1
	isb
	msr	sctlr_el1, x0
	isb
	ic	iallu
	dsb	nsh
	isb
	.rept	8
	mov	x0, x0
	.endr
	bl	restore_args
	adr	x20, boot_mode
	ldr	x20, [x20]
	cmp	w20, #BOOT_CPU_MODE_EL1
	b.ne	boot_from_el2
#ifdef CONFIG_RTOS_HAL_DEBUG_LL
	ldr x4, debug_ll
	cmp x4, 0
	b.eq 10f
	ldr x1, uart_addr
	ldr x2, uart_type
10:
#endif
	br	x25
	b	fail_boot
boot_from_el2:
	hvc	#HVC_BOOT_KERNEL
fail_boot:

RTOS_ENTRY_NOTAG(el2_setup)
        mrs     x0, CurrentEL
        cmp     x0, #CurrentEL_EL2
        b.ne    1f
        mrs     x0, sctlr_el2
CPU_BE( orr     x0, x0, #(1 << 25)      )       // Set the EE bit for EL2
CPU_LE( bic     x0, x0, #(1 << 25)      )       // Clear the EE bit for EL2
        msr     sctlr_el2, x0
        b       2f
1:      mrs     x0, sctlr_el1
CPU_BE( orr     x0, x0, #(3 << 24)      )       // Set the EE and E0E bits for EL1
CPU_LE( bic     x0, x0, #(3 << 24)      )       // Clear the EE and E0E bits for EL1
        msr     sctlr_el1, x0
        mov     w20, #BOOT_CPU_MODE_EL1         // This cpu booted in EL1
        isb
        ret

        /* Hyp configuration. */
2:      mov     x0, #(1 << 31)                  // 64-bit EL1
        msr     hcr_el2, x0

        /* Generic timers. */
        mrs     x0, cnthctl_el2
        orr     x0, x0, #3                      // Enable EL1 physical timers
        msr     cnthctl_el2, x0
        msr     cntvoff_el2, xzr                // Clear virtual offset

#ifdef CONFIG_ARM_GIC_V3
        /* GICv3 system register access */
        mrs     x0, id_aa64pfr0_el1
        ubfx    x0, x0, #24, #4
        cmp     x0, #1
        b.ne    3f

        mrs_s   x0, SYS_ICC_SRE_EL2
        orr     x0, x0, #ICC_SRE_EL2_SRE        // Set ICC_SRE_EL2.SRE==1
        orr     x0, x0, #ICC_SRE_EL2_ENABLE     // Set ICC_SRE_EL2.Enable==1
        msr_s   SYS_ICC_SRE_EL2, x0
        isb                                     // Make sure SRE is now set
        msr_s   SYS_ICH_HCR_EL2, xzr                // Reset ICC_HCR_EL2 to defaults

3:
#endif
        /* Populate ID registers. */
        mrs     x0, midr_el1
        mrs     x1, mpidr_el1
        msr     vpidr_el2, x0
        msr     vmpidr_el2, x1

        /* sctlr_el1 */
        mov     x0, #0x0800                     // Set/clear RES{1,0} bits
CPU_BE( movk    x0, #0x33d0, lsl #16    )       // Set EE and E0E on BE systems
CPU_LE( movk    x0, #0x30d0, lsl #16    )       // Clear EE and E0E on LE systems
        msr     sctlr_el1, x0

        /* Coprocessor traps. */
        mov     x0, #0x33ff
        msr     cptr_el2, x0                    // Disable copro. traps to EL2

#ifdef CONFIG_COMPAT
        msr     hstr_el2, xzr                   // Disable CP15 traps to EL2
#endif

        /* Stage-2 translation */
        msr     vttbr_el2, xzr

	/* Hypervisor stub */
	adrp	x0, __hyp_stub_vectors
	add	x0, x0, #:lo12:__hyp_stub_vectors
	mov	x22,x0
	msr	vbar_el2, x0

        /* spsr */
        mov     x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
                      PSR_MODE_EL1h)
        msr     spsr_el2, x0
        msr     elr_el2, lr
        mov     w20, #BOOT_CPU_MODE_EL2         // This CPU booted in EL2
        eret
ENDPROC(el2_setup)

RTOS_ENTRY_NOTAG(save_args)
	adr	x21, boot_args
	stp	x10, x11, [x21]
	stp	x12, x13, [x21, #16]
	adr	x21, dtb_addr
	str	x10, [x21]
	ret
ENDPROC(save_args)

RTOS_ENTRY_NOTAG(restore_args)
	mov	x26, lr
	adr	x21, boot_args
	ldp	x0, x1, [x21]
	ldp     x2, x3, [x21, #16]
        ret	x26
ENDPROC(restore_args)

RTOS_ENTRY_NOTAG(save_boot_mode)
	mov	x21, 0
	mov	w21, w20
	mov	x20, x21
	adr	x21, boot_mode
	str	x20, [x21]
	dmb	sy
	dc	ivac, x21			// Invalidate potentially stale cache line
	ret
ENDPROC(save_boot_mode)

#include "../../mm/cache.S"

/*
 * Macro to create a table entry to the next page.
 *
 *      tbl:    page table address
 *      virt:   virtual address
 *      shift:  #imm page table shift
 *      ptrs:   #imm pointers per table page
 *
 * Preserves:   virt
 * Corrupts:    tmp1, tmp2
 * Returns:     tbl -> next level table page address
 */
        .macro  create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
        lsr     \tmp1, \virt, #\shift
        and     \tmp1, \tmp1, #\ptrs - 1        // table index
#ifdef CONFIG_ARM64_64K_PAGES
	mov	\tmp2, #PAGE_SIZE_4K
#else
	mov	\tmp2, #PAGE_SIZE
#endif
	madd	\tmp2, \tmp1, \tmp2, \tmp2
        add     \tmp2, \tbl, \tmp2
        orr     \tmp2, \tmp2, #PMD_TYPE_TABLE   // address of next table and entry type
        str     \tmp2, [\tbl, \tmp1, lsl #3]
       // add     \tbl, \tbl, #PAGE_SIZE          // next level table page
        bic	\tbl, \tmp2, #PMD_TYPE_TABLE
	.endm

/*
 * Macro to populate the PGD (and possibily PUD) for the corresponding
 * block entry in the next level (tbl) for the given virtual address.
 *
 * Preserves:   tbl, next, virt
 * Corrupts:    tmp1, tmp2
 */
        .macro  create_pgd_entry, tbl, virt, tmp1, tmp2
#ifdef CONFIG_ARM64_64K_PAGES
        create_table_entry \tbl, \virt, PGDIR_SHIFT_2M, PTRS_PER_PGD_2M, \tmp1, \tmp2
#else
        create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
#endif
#if SWAPPER_PGTABLE_LEVELS == 3
#ifdef CONFIG_ARM64_64K_PAGES
        create_table_entry \tbl, \virt, TABLE_SHIFT_2M, PTRS_PER_PTE_2M, \tmp1, \tmp2
#else
        create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
#endif
#endif
        .endm

/*
 * Macro to populate block entries in the page table for the start..end
 * virtual range (inclusive).
 *
 * Preserves:   tbl, flags
 * Corrupts:    phys, start, end, pstate
 */
        .macro  create_block_map, tbl, flags, phys, start, end
        lsr     \phys, \phys, #BLOCK_SHIFT
        lsr     \start, \start, #BLOCK_SHIFT
#ifdef CONFIG_ARM64_64K_PAGES
        and     \start, \start, #PTRS_PER_PTE_2M - 1       // table index
#else
        and     \start, \start, #PTRS_PER_PTE - 1       // table index
#endif
        orr     \phys, \flags, \phys, lsl #BLOCK_SHIFT  // table entry
	sub	\end, \end, #1
        lsr     \end, \end, #BLOCK_SHIFT
#ifdef CONFIG_ARM64_64K_PAGES
        and     \end, \end, #PTRS_PER_PTE_2M - 1           // table end index
#else
        and     \end, \end, #PTRS_PER_PTE - 1           // table end index
#endif
9999:   str     \phys, [\tbl, \start, lsl #3]           // store the entry
        add     \start, \start, #1                      // next entry
        add     \phys, \phys, #BLOCK_SIZE               // next block
        cmp     \start, \end
        b.ls    9999b
        .endm

#ifdef CONFIG_RTOS_HAL_SELFDECOMPRESS_MAP_PHYS_ADDR_ABOVE_4G
/*
 * Macro to map phy addr that above 4G
 *      virt: virtual addr
 *      phys: physical addr
 * Corrupts:    virt, phys, tmp1, tmp2, tmp3, tmp4
 */
	.macro  map_addr_above_4G, virt, phys, tmp1, tmp2, tmp3, tmp4
	ldr	\tmp1, tbl_addr
	and	\tmp3, \phys, #BLOCK_SIZE - 1         // get In-block offset
	add	\virt, \virt, \tmp3
	create_pgd_entry \tmp1, \virt, \tmp3, \tmp4
	mov	\tmp2, #DEV_MMUFLAGS
	mov	\tmp3, \virt
	add	\tmp4, \virt, #0x1000
	create_block_map \tmp1, \tmp2, \phys, \tmp3, \tmp4     // do map
	.endm
#endif

RTOS_ENTRY_NOTAG(__create_page_tables)
        push_regs
	mov     x27, lr
        /*
         * Invalidate the idmap and swapper page tables to avoid potential
         * dirty cache lines being evicted.
         */
        mov     x0, x26
#ifdef CONFIG_RTOS_HAL_SELFDECOMPRESS_MAP_PHYS_ADDR_ABOVE_4G
        /*
         * 0x5000 is the size of the memory occupied by the page table.
         * The PGD uses 1*4 KB, and the PTE uses 4*4 KB to support a maximum of 4 GB
         * identity mapping memory access.
         * Currently, the physical address of the peripheral is greater than 4 GB.
         * To prevent conflicts with the existing virtual address in 4 GB, We used a
         * virtual address larger than 4G to map a physical address
         * larger than 4G (not identity mapping).
         * Therefore, the memory usage of a PTE is increased by 4 KB.
         */
        mov     x1, #0x6000
#else
        mov     x1, #0x5000
#endif
        bl      __dma_inv_area

        /*
         * Clear the idmap and swapper page tables.
         */
        mov     x0, x26
#ifdef CONFIG_RTOS_HAL_SELFDECOMPRESS_MAP_PHYS_ADDR_ABOVE_4G
        add     x6, x26, #0x6000
#else
        add     x6, x26, #0x5000
#endif
1:      stp     xzr, xzr, [x0], #16
        stp     xzr, xzr, [x0], #16
        stp     xzr, xzr, [x0], #16
        stp     xzr, xzr, [x0], #16
        cmp     x0, x6
        b.lo    1b

        /*
         * Create the identity mapping for all 4G addr.
         */
	mov	x4, #0x40000000
	mov	x7, #DEV_MMUFLAGS
        mov     x0, x26
        mov     x3, #0
        create_pgd_entry x0, x3, x5, x6
        mov     x5, x3
        add     x6, x5, x4
        create_block_map x0, x7, x3, x5, x6

	mov     x7, #DEV_MMUFLAGS
	mov	x0, x26
	mov	x3, #0x40000000
	create_pgd_entry x0, x3, x5, x6
	mov	x5, x3
	add	x6, x5, x4
	create_block_map x0, x7, x3, x5, x6

        mov     x7, #DEV_MMUFLAGS
        mov     x0, x26
        mov     x3, #0x80000000
        create_pgd_entry x0, x3, x5, x6
        mov     x5, x3
        add     x6, x5, x4
        create_block_map x0, x7, x3, x5, x6

        mov     x7, #DEV_MMUFLAGS
        mov     x0, x26
        mov     x3, #0xc0000000
        create_pgd_entry x0, x3, x5, x6
        mov     x5, x3
        add     x6, x5, x4
        create_block_map x0, x7, x3, x5, x6

        /*
         * Create the identity mapping for all ddr we used.
         */
        mov     x7, #MM_MMUFLAGS
        mov     x0, x26                         // idmap_pg_dir
        mov     x3, x25                // __pa(KERNEL_START)
	create_pgd_entry x0, x3, x5, x6
        mov     x5, x3                          // __pa(KERNEL_START befor move)
        add     x6, x9, #0x100000                  // __pa(KERNEL_END after move + sp + malloc)
        create_block_map x0, x7, x3, x5, x6

	mov     x7, #MM_MMUFLAGS
        mov     x0, x26
        ldr     x3, dtb_addr
	create_pgd_entry x0, x3, x5, x6
        mov     x5, x3
        add     x6, x5, #0x10000
        create_block_map x0, x7, x3, x5, x6

	dmb     sy
	bl	__cpu_setup
	pop_regs
        ret	x27
ENDPROC(__create_page_tables)

RTOS_ENTRY_NOTAG(__cpu_setup)
        tlbi    vmalle1is                       // invalidate I + D TLBs
        dsb     ish

	str	lr, [sp, #-0x10]!



        mov     x0, #3 << 20
        msr     cpacr_el1, x0                   // Enable FP/ASIMD
        msr     mdscr_el1, xzr                  // Reset mdscr_el1
        /*
         * Memory region attributes for LPAE:
         *
         *   n = AttrIndx[2:0]
         *                      n       MAIR
         *   DEVICE_nGnRnE      000     00000000
         *   DEVICE_nGnRE       001     00000100
         *   DEVICE_GRE         010     00001100
         *   NORMAL_NC          011     01000100
         *   NORMAL             100     11111111
         *   NORMAL_WT          101     10111011
         */
        adr     x5, MAIR
	ldr	x5, [x5]
        msr     mair_el1, x5
        /*
         * Prepare SCTLR
         */
        adr     x5, crval
        ldp     w5, w6, [x5]
        mrs     x0, sctlr_el1
        bic     x0, x0, x5                      // clear bits
        orr     x0, x0, x6                      // set bits
        /*
         * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
         * both user and kernel.
         */
        adr	x10, TCR
	ldr	x10, [x10]

	adr	x9, idmap_t0sz
	ldr	x9, [x9]


	bfi     x10, x9, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
	//tcr_set_idmap_t0sz      x10, x9


        /*
         * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
         * TCR_EL1.
         */
        mrs     x9, ID_AA64MMFR0_EL1
        bfi     x10, x9, #32, #3
	orr     x10, x10, #1<<23		//disable ttbr1_el1
        msr     tcr_el1, x10

        b	__enable_mmu                                     // return to head.S
ENDPROC(__cpu_setup)

RTOS_ENTRY_NOTAG(__enable_mmu)
        msr     ttbr0_el1, x26                  // load TTBR0
        isb
        msr     sctlr_el1, x0
        isb
        /*
         * Invalidate the local I-cache so that any instructions fetched
         * speculatively from the PoC are discarded, since they may have
         * been dynamically patched at the PoU.
         */


        ic      iallu
        dsb     nsh
        isb
	.rept	8
        mov     x0, x0
	.endr
	ldr     lr, [sp]
	ldr	w0, [lr]

	ldr     lr, [sp], #0x10
	ret
ENDPROC(__enable_mmu)

#ifdef CONFIG_RTOS_HAL_SELFDECOMPRESS_MAP_PHYS_ADDR_ABOVE_4G
RTOS_ENTRY_NOTAG(map_uart_addr_above_4G)
	ldr	x1, uart_addr
	lsr	x3, x1, #32
	cmp	x3, #0
	b.eq	1f
	ldr	x0, =UART_VIRT_ADDR        // use this virt addr to map uart phy addr
	adr	x2, uart_addr
	map_addr_above_4G x0, x1, x3, x4, x5, x6
	str	x0, [x2]                // use virt addr as the value of the variable
1:
	ret
ENDPROC(map_uart_addr_above_4G)

RTOS_ENTRY_NOTAG(map_hwdog_addr_above_4G)
	lsr	x3, x0, #32
	cmp	x3, #0
	b.eq	1f
	ldr	x1, =HWDOG_VIRT_ADDR        // use this virt addr to map hwdog phy addr
	map_addr_above_4G x1, x0, x3, x4, x5, x6
	mov	x0, x1
1:
	ret
ENDPROC(map_hwdog_addr_above_4G)
#endif

RTOS_ENTRY_NOTAG(add_peripheral_page_tables)
        //map uart
        ldr   x3, uart_addr
        mov     x0, x26
        mov     x7, #DEV_MMUFLAGS
        create_pgd_entry x0, x3, x5, x6
        mov     x5, x3                          // __pa(KERNEL_START)
        add     x6, x5, #0x1000                  // __pa(KERNEL_END)
        create_block_map x0, x7, x3, x5, x6

        //map djtag for llc maintain
        ldr   x3, sysctl_addr
        mov     x0, x26
        mov     x7, #DEV_MMUFLAGS
        create_pgd_entry x0, x3, x5, x6
        mov     x5, x3                          // __pa(KERNEL_START)
        add     x6, x5, #0x10000                  // __pa(KERNEL_END)
        create_block_map x0, x7, x3, x5, x6

	ldr  x3, sysctl_addr_b
	cmp x3, #0
	beq 11111f
	mov  x0,x26
	mov  x7, #DEV_MMUFLAGS
	create_pgd_entry x0, x3, x5, x6
	mov     x5, x3
	add     x6, x5, #0x10000
	create_block_map x0, x7, x3, x5, x6

11111:	tlbi    vmalle1is
	ret
ENDPROC(add_peripheral_page_tables)

        .align  3
        .global uart_addr
uart_addr:
        .quad	0x0
#ifdef CONFIG_RTOS_HAL_SELFDECOMPRESS_MAP_PHYS_ADDR_ABOVE_4G
tbl_addr:
        .quad	0x0
#endif

#ifdef CONFIG_RTOS_HAL_DEBUG_LL
        .global uart_type
uart_type:
        .quad	0x0

        .global debug_ll
debug_ll:
        .quad	0x0
#endif

        .global sysctl_addr
sysctl_addr:
        .quad   0x0

	.global sysctl_addr_b
sysctl_addr_b:
	.quad	0x0

        .global dtb_addr
dtb_addr:
        .quad   0

	.global llc_type
llc_type:
	.quad   0

	.type   boot_mode, #object
boot_mode:
	.quad	0

	.type   boot_args, #object
boot_args:
	.quad	0
	.quad	0
	.quad	0
	.quad	0
	.quad	0

	.type   LC0, #object
LC0:	.quad   LC0                     // x1
	.quad   __bss_start             // x2
	.quad   _end                    // x3
	.quad   _edata                  // x6
	.quad   input_data_end - 4      // x10 (inflated size location)
	.quad   _got_start              // x11
	.quad   _got_end                // x12
	.quad   .L_user_stack_end       // sp
	.size   LC0, . - LC0

idmap_t0sz:
#ifdef CONFIG_ARM64_64K_PAGES
	.quad  TCR_T0SZ(VA_BITS_48)
#else
	.quad  TCR_T0SZ(VA_BITS)
#endif

TCR:
#ifdef CONFIG_ARM64_64K_PAGES
	.quad	TCR_TxSZ(VA_BITS_48) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
#else
	.quad	TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
#endif
MAIR:
	.quad	MAIR(0x00, MT_DEVICE_nGnRnE) | \
                     MAIR(0x04, MT_DEVICE_nGnRE) | \
                     MAIR(0x0c, MT_DEVICE_GRE) | \
                     MAIR(0x44, MT_NORMAL_NC) | \
                     MAIR(0xff, MT_NORMAL) | \
                     MAIR(0xbb, MT_NORMAL_WT)

        .type   crval, #object
crval:
        .word   0xfcffffff                      // clear
        .word   0x34d5d91d                      // set

reloc_code_end:

        .section ".stack", "aw", %nobits
        .align  12
.L_user_stack:  .space  4096
.L_user_stack_end:
