/*
 *Copyright (c) 2024 Black Sesame Technologies
 *
 *Licensed under the Apache License, Version 2.0 (the "License");
 *you may not use this file except in compliance with the License.
 *You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 *Unless required by applicable law or agreed to in writing, software
 *distributed under the License is distributed on an "AS IS" BASIS,
 *WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *See the License for the specific language governing permissions and
 *limitations under the License.
*/

    .syntax unified
    .arch armv7-r

/* Vector Table */
/* CPSR Macros: */
.EQU  CPSR_MODE_USR,        0x10
.EQU  CPSR_MODE_FIQ,        0x11
.EQU  CPSR_MODE_IRQ,        0x12
.EQU  CPSR_MODE_SVC,        0x13
.EQU  CPSR_MODE_ABT,        0x17
.EQU  CPSR_MODE_UND,        0x1B
.EQU  CPSR_MODE_SYS,        0x1F
    
    .extern fiq_enter
    .extern irq_enter
    .extern dabort_main
    .extern pabort_main
    .extern undef_main
    .extern swi_main
    
    .section  .text    
    .align 2                
    .globl reset
    .globl Reset_Handler
//-------------------------------------------------------------------------------
Reset_Handler:
                B       __test_main
undef:           
                LDR     pc, =u_exception                  // undef
swi:                
                LDR     pc,=swi_handler
pabort:
                LDR     pc, =p_exception                  // prefetch abort
dabort:
                LDR     pc, =d_exception                  // data abort
                LDR     pc, =d_exception                  // (reserved)
irq:
                LDR     pc, =irq_handler                  // IRQ
fiq:
//              LDR     pc, =fiq_handler                  // FIQ

fiq_handler: 
    SUB   lr,lr,#4   
    STMFD     SP!, {R0-R7,  LR} 
    LDR  lr,=fiq_ret
    LDR  pc,=fiq_enter
fiq_ret:    
    LDMIA     SP!, {R0-R7, PC}^  


irq_handler: 
    SUB   lr,lr,#4   
    STMFD     SP!, {R0-R12,  LR} 
    LDR  lr,=irq_ret
    LDR  pc,=irq_enter
irq_ret:    
    LDMIA     SP!, {R0-R12, PC}^   

u_exception:
    SUB   lr,lr,#4
    STMFD     SP!, {R0-R12,  LR} 
    LDR  lr,=undef_ret
    LDR  pc,=undef_main
undef_ret:    
    LDMIA     SP!, {R0-R12, PC}^

d_exception:
    SUB   lr,lr,#8
    STMFD     SP!, {R0-R12,  LR} 
    MOV R0, LR
    MOV R1, SP
    LDR  lr,=dept_ret
    LDR  pc,=dabort_main
dept_ret:    
    LDMIA     SP!, {R0-R12, PC}^

p_exception:
    SUB   lr,lr,#4
    STMFD     SP!, {R0-R12,  LR} 
    LDR  lr,=pept_ret
    LDR  pc,=pabort_main
pept_ret:    
    LDMIA     SP!, {R0-R12, PC}^


swi_handler:    
    STMFD     SP!, {R0-R12,  LR} 
    LDR  lr,=swi_ret
    LDR  pc,=swi_main
swi_ret:    
    LDMIA     SP!, {R0-R12, PC}^

//-------------------------------------------------------------------------------

                .globl __test_main
__test_main:
        ldr r0, =0x217A0224
        ldr r1, =0xABCD1234
        str r1, [r0]
        ldr r0, =0x217a0000
        ldr r1, =0x40FFFF
        str r1, [r0]
#if 1
//----------------------------------------------------------------
// Disable MPU and caches
//----------------------------------------------------------------

// Disable MPU and cache in case it was left enabled from an earlier run
// This does not need to be done from a cold reset

        MRC     p15, 0, r0, c1, c0, 0       // Read System Control Register
        BIC     r0, r0, #0x05               // Disable MPU (M bit) and data cache (C bit)
        BIC     r0, r0, #0x1000             // Disable instruction cache (I bit)
        DSB                                 // Ensure all previous loads/stores have completed
        MCR     p15, 0, r0, c1, c0, 0       // Write System Control Register
        ISB                                 // Ensure subsequent insts execute wrt new MPU settings

//----------------------------------------------------------------
// Disable Branch prediction
//----------------------------------------------------------------

// In the Cortex-R5, the Z-bit of the SCTLR does not control the program flow prediction.
// Some control bits in the ACTLR control the program flow and prefetch features instead.
// These are enabled by default, but are shown here for completeness.

        MRC     p15, 0, r0, c1, c0, 1       // Read ACTLR
        ORR     r0, r0, #(0x1 << 17)        // Enable RSDIS bit 17 to disable the return stack
        ORR     r0, r0, #(0x1 << 16)        // Clear BP bit 15 and set BP bit 16:
        BIC     r0, r0, #(0x1 << 15)        // Branch always not taken and history table updates disabled
        MCR     p15, 0, r0, c1, c0, 1       // Write ACTLR
        ISB


@  ; Setup stacks
@  ;---------------
    MRC     p15, 0, r0, c0, c0, 5       @ Read CPU ID register
    ANDS    r0, r0, #0x03               @ Mask off, leaving the CPU ID field

    MOV     r1, #CPSR_MODE_FIQ                    @ Build FIQ mode CPSR
    MSR     CPSR_c, r1                            @ Enter FIQ mode
    LDR     r1, =__fiq_stack                      @ FIQ stacks for CPU 0,1
    SUB     r1, r1, r0, LSL #10                   @ 1024 bytes of FIQ stack per CPU (0,1) - see *.ld
    MOV     sp, r1

    MOV     r1, #CPSR_MODE_IRQ                    @ Build IRQ mode CPSR
    MSR     CPSR_c, r1                            @ Enter IRQ mode
    LDR     r1, =__irq_stack                      @ IRQ stacks for CPU 0,1
    SUB     r1, r1, r0, LSL #10                   @ 1024 bytes of IRQ stack per CPU (0,1) - see *.ld
    MOV     sp, r1

    MOV     r2, #CPSR_MODE_SVC                    @ Build SVC mode CPSR
    MSR     CPSR_c, r2                            @ Enter SVC mode
    LDR     r1, =__supervisor_stack               @ App stacks for all CPUs
    SUB     r1, r1, r0, LSL #10                   @ 1024 bytes of App stack per CPU - see *.ld
    MOV     sp, r1

    MOV     r2, #CPSR_MODE_ABT                    @ Build ABT mode CPSR
    MSR     CPSR_c, r2                            @ Enter ABT mode
    LDR     r1, =__abort_stack                    @ App stacks for all CPUs
    SUB     r1, r1, r0, LSL #10                   @ 1024 bytes of App stack per CPU - see *.ld
    MOV     sp, r1

    MOV     r2, #CPSR_MODE_UND                    @ Build UND mode CPSR
    MSR     CPSR_c, r2                            @ Enter UND mode
    LDR     r1, =__undef_stack                    @ App stacks for all CPUs
    SUB     r1, r1, r0, LSL #10                   @ 1024 bytes of App stack per CPU - see *.ld
    MOV     sp, r1

    MOV     r2, #CPSR_MODE_SYS                    @ Build SYS mode CPSR
    MSR     CPSR_c, r2                            @ Enter SYS mode
    LDR     r1, =___stack                         @ App stacks for all CPUs
    SUB     r1, r1, r0, LSL #12                   @ 0x1000 bytes of App stack per CPU - see *.ld
    MOV     sp, r1

//----------------------------------------------------------------
// Cache invalidation
//----------------------------------------------------------------

        DSB                 // Complete all outstanding explicit memory operations
        MOV     r0, #0
        MCR     p15, 0, r0, c7, c5, 0       // Invalidate entire instruction cache
        MCR     p15, 0, r0, c15, c5, 0      // Invalidate entire data cache

//----------------------------------------------------------------
// MPU Configuration
//----------------------------------------------------------------

// Notes:
// * Regions apply to both instruction and data accesses.
// * Each region base address must be a multiple of its size
// * Any address range not covered by an enabled region will abort
// * The region at 0x0 over the Vector table is needed to support semihosting

//init mpu region 0,Device 512M          0~0x10000000 （R0）
        MOV    r0, #0                     //region 1
        MOV    r3, #0                     //base addr from 0
//      MOV    r1,#0x310                  //tex[5:3],ap[10:8],c=0,b=0,ap=3    ap=3,read+write,tex=2,non-share device
        MOV    r1,#0x308                  //tex[5:3],ap[10:8],c=0,b=0,ap=3    ap=3,read+write,tex=1,normal(no cache)
//      MOV    r1,#0x608                  //tex[5:3],ap[10:8],c=0,b=0,ap=6    ap=6,read only, tex=1,normal(no cache)
        MOV    r2,#0x39                   //size 512M,enable
        MCR    p15,0,r0,c6,c2,0           // Set region number
        MCR    p15,0,r3,c6,c1,0           // Region base address         
        MCR    p15,0,r1,c6,c1,4           // Region access control        
        MCR    p15,0,r2,c6,c1,2           // Region size and enable 

//init mpu region 1,Device 128M          0x10000000~0x18000000 （R0）
        MOV    r0, #1                     //region shi 
        MOV    r3, #0x10000000            //base addr from 0
        MOV    r1,#0x310                  //normal,tex[5：3] 001 ,c=0,b=0,ap=3
        MOV    r2,#0x35                   //size 128M,enable
        MCR    p15,0,r0,c6,c2,0           // Set region number
        MCR    p15,0,r3,c6,c1,0           // Region base address         
        MCR    p15,0,r1,c6,c1,4           // Region access control        
        MCR    p15,0,r2,c6,c1,2           // Region size and enable 
        
//init mpu region 2,rom sram 128M          0x18000000~0x20000000 （rw）
        MOV    r0, #2                     //region 1
        MOV    r3, #0x18000000            //base addr from 0
        MOV    r1,#0x308                  //normal,tex[5：3] 001 ,c=0,b=0,ap=3
        MOV    r2,#0x35                   //size 128M,enable
        MCR    p15,0,r0,c6,c2,0           // Set region number
        MCR    p15,0,r3,c6,c1,0           // Region base address         
        MCR    p15,0,r1,c6,c1,4           // Region access control        
        MCR    p15,0,r2,c6,c1,2           // Region size and enable             


//init mpu region 3,Device 1G          0x20000000~0x30000000 （R0）
        MOV    r0, #3                   //region 0
        MOV    r3, #0x20000000          //base addr from 0x20000000 
        MOV    r1,#0x300                // share device,tex[3] 000 ,c=0,b=0,ap=3, force Strongly-ordered. incase ISR#1023 
        MOV    r2,#0x39                   //size 1G,enable    
        MCR    p15,0,r0,c6,c2,0            // Set region number
        MCR    p15,0,r3,c6,c1,0           // Region base address         
        MCR    p15,0,r1,c6,c1,4           // Region access control        
        MCR    p15,0,r2,c6,c1,2           // Region size and enable 
    
//init mpu region 4,Device 512M          0x30000000~0x40000000 （R0）
        MOV    r0, #4                     //region 0
        MOV    r3, #0x30000000              //base addr from 0x20000000 
        MOV    r1,#0x301                // share device,tex[3] 000 ,c=0,b=1,ap=3
        MOV    r2,#0x37                   //size 512M,enable    
        MCR    p15,0,r0,c6,c2,0            // Set region number
        MCR    p15,0,r3,c6,c1,0           // Region base address         
        MCR    p15,0,r1,c6,c1,4           // Region access control        
        MCR    p15,0,r2,c6,c1,2           // Region size and enable 
        
//init mpu region 5,Device 512M          0x40000000~0x50000000 （R0）
        MOV    r0, #5                     //region 0
        MOV    r3, #0x40000000              //base addr from 0x20000000 
        MOV    r1,#0x301                // share device,tex[3] 000 ,c=0,b=1,ap=3
        MOV    r2,#0x3B                   //size 512M,enable    
        MCR    p15,0,r0,c6,c2,0            // Set region number
        MCR    p15,0,r3,c6,c1,0           // Region base address         
        MCR    p15,0,r1,c6,c1,4           // Region access control        
        MCR    p15,0,r2,c6,c1,2           // Region size and enable 

//init mpu region 6,DDR  2G           0x80000000~0x1 00000000 （R0）
        MOV    r0, #6                    //region 0
        MOV    r3, #0x80000000              //base addr from 0x20000000 
        MOV    r1,#0x308                // share device,tex[3] 000 ,c=0,b=1,ap=3
        MOV    r2,#0x3b                  //size 2G,enable    
        MCR    p15,0,r0,c6,c2,0            // Set region number
        MCR    p15,0,r3,c6,c1,0           // Region base address         
        MCR    p15,0,r1,c6,c1,4           // Region access control        
        MCR    p15,0,r2,c6,c1,2           // Region size and enable 

//init mpu region 7,DDR  2G           0x80000000~0x1 00000000 （R0）
        MOV    r0, #7                    //region 0
        MOV    r3, #0xc0000000              //base addr from 0x20000000 
        MOV    r1,#0x301                // share device,tex[3] 000 ,c=0,b=1,ap=3
        MOV    r2,#0x3b                  //size 2G,enable    
        MCR    p15,0,r0,c6,c2,0            // Set region number
        MCR    p15,0,r3,c6,c1,0           // Region base address         
        MCR    p15,0,r1,c6,c1,4           // Region access control        
        MCR    p15,0,r2,c6,c1,2           // Region size and enable

#if 1
//init mpu region 8,SW CORE0           0x4800000~0x4880000 （R0）
        MOV    r0, #8                    //region 8
        MOV    r3, #0x4800000            //base addr from 0x4800000 
        MOV    r1,#0x30b                 // share device,tex[3] 000 ,c=1,b=1,ap=3
//        MOV    r2,#0x25                  //size 512K,enable    
        MOV    r2,#0x23                  //size 256K,enable    
        MCR    p15,0,r0,c6,c2,0          // Set region number
        MCR    p15,0,r3,c6,c1,0           // Region base address         
        MCR    p15,0,r1,c6,c1,4           // Region access control        
        MCR    p15,0,r2,c6,c1,2           // Region size and enable
#endif

//----------------------------------------------------------------
// Enable Branch prediction
//----------------------------------------------------------------

// In the Cortex-R5, the Z-bit of the SCTLR does not control the program flow prediction. 
// Some control bits in the ACTLR control the program flow and prefetch features instead.
// These are enabled by default, but are shown here for completeness.

        MRC     p15, 0, r0, c1, c0, 1       // Read ACTLR
        BIC     r0, r0, #(0x1 << 17)        // Clear RSDIS bit 17 to enable return stack
        BIC     r0, r0, #(0x1 << 16)        // Clear BP bit 15 and BP bit 16:
        BIC     r0, r0, #(0x1 << 15)        // Normal operation, BP is taken from the global history table.
        MCR     p15, 0, r0, c1, c0, 1       // Write ACTLR
        ISB

//----------------------------------------------------------------
// Enable MPU and branch to C library init
// Leaving the caches disabled until after scatter loading.
//----------------------------------------------------------------

        MRC     p15, 0, r0, c1, c0, 0       // Read System Control Register
        ORR     r0, r0, #0x01               // Set M bit to enable MPU
        DSB                                 // Ensure all previous loads/stores have completed
        MCR     p15, 0, r0, c1, c0, 0       // Write System Control Register
        ISB                                 // Ensure subsequent insts execute wrt new MPU settings

#endif
// enable I-Cache
#if 0
        MRC     p15, 0, R1, c1, c0, 0
        ORR R1, R1, #0x1 << 12
        DSB
        MCR p15, 0, R1, c1, c0, 0
        ISB
#endif

    //enable cache
    MRC     p15, 0, r0, c1, c0, 0       // Read System Control Register
    ORR     r0, r0, #(0x1 << 12)        // enable I Cache
    ORR     r0, r0, #(0x1 << 2)         // enable D Cache
    MCR     p15, 0, r0, c1, c0, 0       // Write System Control Register
    ISB

    //.extern init_f
    //bl init_f
    

    MOV     R1,#0
    //b  loop_relocal_code
    ldr  r2, =__bss_start__
    b  LoopFillZerobss

relocal_code:
      ldr  r3, =__reloce_src
      ldr  r3, [r3, r1]
      str  r3, [r0, r1]
      adds  r1, r1, #4

loop_relocal_code:
      ldr  r0, =__reloce_start
      ldr  r3, =__reloce_end
      adds  r2, r0, r1
      cmp  r2, r3
      bcc  relocal_code
      MOV      R1,#0   
      b  LoopCopyDataInit

CopyDataInit:
      ldr  r3, =_sidata
      ldr  r3, [r3, r1]
      str  r3, [r0, r1]
      adds  r1, r1, #4
        
LoopCopyDataInit:
      ldr  r0, =_sdata
      ldr  r3, =_edata
      adds  r2, r0, r1
      cmp  r2, r3
      bcc  CopyDataInit
      ldr  r2, =__bss_start__
      b  LoopFillZerobss
//Zero fill the bss segment. 
            
FillZerobs:
      movs  r3, #0
      str  r3, [r2], #4
    
LoopFillZerobss:
      ldr  r3, = __bss_end__
      cmp  r2, r3
      bcc  FillZerobs
  
 .globl __user_initial_stackheap

__user_initial_stackheap:

        
    // Init regs 0-3
    MOV     R0,#0
    MOV     R1,#0
    MOV     R2,#0
    MOV     R3,#0

    // Init regs 4-8
    MOV     R4,#0
    MOV     R5,#0
    MOV     R6,#0
    MOV     R7,#0
    MOV     R8,#0

    // Init regs 9-14
    // The dedicated registers in the APCS
    MOV     R9,#0
    MOV     R10,#0
    MOV     R11,#0
    MOV     R12,#0
    MOV     R14,#0

    //--------------------------------------------------
    // Call entry point to arm library
    // call __rt_entry rather than __main (calls __entry if __rt_entry
    // isn't defined) to avoid copying data from ROM to RAM, and to avoid
    // zero-init of RAM
    .extern arch_cpu_init

   .extern cpu0_main
   .extern cpu1_main         
@  ; SMP initialization
@  ; -------------------
    //MRC     p15, 0, r0, c0, c0, 5     @ Read CPU ID register
    //ANDS    r0, r0, #0x03             @ Mask off, leaving the CPU ID field
    //BEQ     cpu0_main
    //BNE     cpu1_main
    B   cpu0_main


//----------------------------------------------------------------
// Global Enable for Instruction and Data Caching
//----------------------------------------------------------------

    .global enable_caches

    .type enable_caches, "function"
    .cfi_startproc
enable_caches:

        MRC     p15, 0, r0, c1, c0, 0       // Read System Control Register
        ORR     r0, r0, #(0x1 << 12)        // enable I Cache
        ORR     r0, r0, #(0x1 << 2)         // enable D Cache
        MCR     p15, 0, r0, c1, c0, 0       // Write System Control Register
        ISB
        DSB

        BX    lr
    .cfi_endproc

    .size enable_caches, . - enable_caches
    
.end
