/* Copyright (c) 2025 Beijing Semidrive Technology Corporation
 * SPDX-License-Identifier: Apache-2.0
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/** *****************************************************************************************************
 * \file     Mcal_Cache.S                                                                                *
 * \brief    Cache opeartions.                                                                          *
 *                                                                                                      *
 * <table>                                                                                              *
 * <tr><th>Date           <th>Version                                                                   *
 * <tr><td>2023/07/15     <td>1.0.0                                                                     *
 * </table>                                                                                             *
 *******************************************************************************************************/
#include "Mcal_CacheAsm.h"
#include "Compiler.h"
#include "Part.h"

#define DCACHE      0x01
#define ICACHE      0x02

#define MCALLIB_CACHE_LINE   CACHE_SIZE

#if defined(__IASMARM__) || defined(__ICCARM__)
        SECTION .mcal_text_asm_mcallib:CODE(2)
        ARM
#elif defined(__ghs__) || defined(__ghs_asm)
        .section ".mcal_text_asm_mcallib"
#else
        .section ".text"
        .arm
#endif
/** *****************************************************************************************************
 * \brief This function provide an security mechanism for enable i-cache or d-cache
 *
 * \verbatim
 * Syntax             : Mcal_ArchEnableCache
 *
 * Service ID[hex]    : None
 *
 * Sync/Async         : Synchronous
 *
 * Reentrancy         : Non Reentrant
 *
 * Parameters (in)    : flags - define the cache which should be enable.
 *
 * Parameters (inout) : None
 *
 * Parameters (out)   : None
 *
 * Return value       : None
 *
 * Description        : Provide an security mechanism enable i-cache and d-cache, before d-cache
 *                      or d-cache enable, disable, cache line should be invalidate.
 * \endverbatim
 * Traceability       : SWSR SWSR_LIB_003
 *******************************************************************************************************/
    ASM_PUBLIC Mcal_ArchEnableCache
/* void Mcal_ArchEnableCache(uint32 flags) */
ASM_FUNCTION(Mcal_ArchEnableCache)
    stmfd   sp!, {r4-r12, lr}

    mov     r7, r0                      // save flags

    mrs     r8, cpsr                    // save the old interrupt state
    cpsid   iaf                         // interrupts disabled

.Ldcache_enable:
    tst     r7, #DCACHE
    beq     .Licache_enable
    mrc     p15, 0, r0, c1, c0, 0       // cr1
    tst     r0, #(1<<2)                 // is the dcache already enabled?
    bne     .Licache_enable

    // invalidate L1 and L2
    // NOTE: trashes a bunch of registers, can't be spilling stuff to the stack
    bl      invalidate_cache

    mrc     p15, 0, r0, c1, c0, 0       // cr1
    orr     r0, r0, #(1<<2)
    mcr     p15, 0, r0, c1, c0, 0       // enable dcache

.Licache_enable:
    tst     r7, #ICACHE
    beq     .Ldone_enable

    mov     r0, #0
    mcr     p15, 0, r0, c7, c5, 0       // invalidate icache to PoU

    mrc     p15, 0, r0, c1, c0, 0       // cr1
    orr     r0, r0, #(1<<12)
    mcr     p15, 0, r0, c1, c0, 0       // enable icache

.Ldone_enable:
    isb
    msr     cpsr_cxsf, r8
    ldmfd   sp!, {r4-r12, pc}
ASM_FUNCTION_END(Mcal_ArchEnableCache)
/** *****************************************************************************************************
 * \brief This function provide an security mechanism for disable i-cache or d-cache
 *
 * \verbatim
 * Syntax             : Mcal_ArchDisableCache
 *
 * Service ID[hex]    : None
 *
 * Sync/Async         : Synchronous
 *
 * Reentrancy         : Non Reentrant
 *
 * Parameters (in)    : flags - define the cache which should be disbale.
 *
 * Parameters (inout) : None
 *
 * Parameters (out)   : None
 *
 * Return value       : None
 *
 * Description        : Provide an security mechanism disable i-cache and d-cache, before d-cache
 *                      disable, it should be clean and invalidate all cache line. after i-cache
 *                      disable, invalidate all cache line.
 * \endverbatim
 * Traceability       : SWSR SWSR_LIB_004
 *******************************************************************************************************/
    ASM_PUBLIC Mcal_ArchDisableCache
/* void Mcal_ArchDisableCache(uint32 flags) */
ASM_FUNCTION(Mcal_ArchDisableCache)
    stmfd   sp!, {r4-r11, lr}

    mov     r7, r0                      // save flags

    mrs     r8, cpsr                    // save the old interrupt state
    cpsid   iaf                         // interrupts disabled

.Ldcache_disable:
    tst     r7, #DCACHE
    beq     .Licache_disable
    mrc     p15, 0, r0, c1, c0, 0       // cr1
    tst     r0, #(1<<2)                 // is the dcache already disabled?
    beq     .Ldcache_already_disabled

    bic     r0, r0, #(1<<2)
    mcr     p15, 0, r0, c1, c0, 0       // disable dcache

    // clean and invalidate the dcache
    // NOTE: trashes a bunch of registers, can't be spilling stuff to the stack
    bl      clean_invalidate_cache

    b       .Ldcache_disable_L2

.Ldcache_already_disabled:
    // make sure all of the caches are invalidated
    // NOTE: trashes a bunch of registers, can't be spilling stuff to the stack
    bl      invalidate_cache

.Ldcache_disable_L2:

.Licache_disable:
    tst     r7, #ICACHE
    beq     .Ldone_disable

    mrc     p15, 0, r0, c1, c0, 0       // cr1
    bic     r0, r0, #(1<<12)
    mcr     p15, 0, r0, c1, c0, 0       // disable icache

.Ldone_disable:
    // make sure the icache is always invalidated
    mov     r0, #0
    mcr     p15, 0, r0, c7, c5, 0       // invalidate icache to PoU

    msr     cpsr_cxsf, r8
    ldmfd   sp!, {r4-r11, pc}
ASM_FUNCTION_END(Mcal_ArchDisableCache)

    ASM_PUBLIC clean_invalidate_cache
    /* clean & invalidate cache routine, trashes r0-r6, r9-r11 */
ASM_FUNCTION(clean_invalidate_cache)
    dmb
    MRC     p15, 1, R0, c0, c0, 1       // Read CLIDR
    ANDS    R3, R0, #0x7000000
    MOV     R3, R3, LSR #23             // Cache level value (naturally aligned)
    BEQ     .Lfinished
    MOV     R10, #0
.Loop1:
    ADD     R2, R10, R10, LSR #1        // Work out 3xcachelevel
    MOV     R1, R0, LSR R2              // bottom 3 bits are the Cache type for this level
    AND     R1, R1, #7                  // get those 3 bits alone
    CMP     R1, #2
    BLT     .Lskip                      // no cache or only instruction cache at this level
    MCR     p15, 2, R10, c0, c0, 0      // write the Cache Size selection register
    isb                                 // ISB to sync the change to the CacheSizeID reg
    MRC     p15, 1, R1, c0, c0, 0       // reads current Cache Size ID register
    AND     R2, R1, #0x7                // extract the line length field
    ADD     R2, R2, #4                  // add 4 for the line length offset (log2 16 bytes)
    LDR     R4, =0x3FF
    ANDS    R4, R4, R1, LSR #3          // R4 is the max number on the way size (right aligned)
    CLZ     R5, R4                      // R5 is the bit position of the way size increment
    LDR     R6, =0x00007FFF
    ANDS    R6, R6, R1, LSR #13         // R6 is the max number of the index size (right aligned)
.Loop2:
    MOV     R9, R4                      // R9 working copy of the max way size (right aligned)
.Loop3:
    ORR     R11, R10, R9, LSL R5        // factor in the way number and cache number into R11
    ORR     R11, R11, R6, LSL R2        // factor in the index number
    MCR     p15, 0, R11, c7, c14, 2     // clean & invalidate by set/way
    SUBS    R9, R9, #1                  // decrement the way number
    BGE     .Loop3
    SUBS    R6, R6, #1                  // decrement the index
    BGE     .Loop2
.Lskip:
    ADD     R10, R10, #2                    // increment the cache number
    CMP     R3, R10
    BGT     .Loop1

.Lfinished:
    mov     r10, #0
    mcr     p15, 2, r10, c0, c0, 0      // select cache level 0
    dsb
    isb

    bx      lr
ASM_FUNCTION_END(clean_invalidate_cache)

    ASM_PUBLIC invalidate_cache
    /* invalidate cache routine, trashes r0-r6, r9-r11 */
ASM_FUNCTION(invalidate_cache)
    dmb
    MRC     p15, 1, R0, c0, c0, 1       // Read CLIDR
    ANDS    R3, R0, #0x7000000
    MOV     R3, R3, LSR #23             // Cache level value (naturally aligned)
    BEQ     .Lfinished_invalidate
    MOV     R10, #0
.Loop1_invalidate:
    ADD     R2, R10, R10, LSR #1        // Work out 3xcachelevel
    MOV     R1, R0, LSR R2              // bottom 3 bits are the Cache type for this level
    AND     R1, R1, #7                  // get those 3 bits alone
    CMP     R1, #2
    BLT     .Lskip_invalidate           // no cache or only instruction cache at this level
    MCR     p15, 2, R10, c0, c0, 0      // write the Cache Size selection register
    isb                                 // ISB to sync the change to the CacheSizeID reg
    MRC     p15, 1, R1, c0, c0, 0       // reads current Cache Size ID register
    AND     R2, R1, #0x7                // extract the line length field
    ADD     R2, R2, #4                  // add 4 for the line length offset (log2 16 bytes)
    LDR     R4, =0x3FF
    ANDS    R4, R4, R1, LSR #3          // R4 is the max number on the way size (right aligned)
    CLZ     R5, R4                      // R5 is the bit position of the way size increment
    LDR     R6, =0x00007FFF
    ANDS    R6, R6, R1, LSR #13         // R6 is the max number of the index size (right aligned)
.Loop2_invalidate:
    MOV     R9, R4                      // R9 working copy of the max way size (right aligned)
.Loop3_invalidate:
    ORR     R11, R10, R9, LSL R5        // factor in the way number and cache number into R11
    ORR     R11, R11, R6, LSL R2        // factor in the index number
    MCR     p15, 0, R11, c7, c6, 2      // invalidate by set/way
    SUBS    R9, R9, #1                  // decrement the way number
    BGE     .Loop3_invalidate
    SUBS    R6, R6, #1                  // decrement the index
    BGE     .Loop2_invalidate
.Lskip_invalidate:
    ADD     R10, R10, #2                // increment the cache number
    CMP     R3, R10
    BGT     .Loop1_invalidate

.Lfinished_invalidate:
    dsb
    mov     r10, #0
    mcr     p15, 2, r10, c0, c0, 0      // select cache level 0
    isb

    bx      lr
ASM_FUNCTION_END(invalidate_cache)

    ASM_PUBLIC Mcal_ArchRdSctlr
    /* uint32 Mcal_ArchRdSctlr(void); */
ASM_FUNCTION(Mcal_ArchRdSctlr)
    mrc p15, 0, r0, c1, c0, 0
    bx lr
ASM_FUNCTION_END(Mcal_ArchRdSctlr)

    ASM_PUBLIC Mcal_CleanCacheRange
    /* void Mcal_CleanCacheRange(unsigned long start, uint32 len); */
ASM_FUNCTION(Mcal_CleanCacheRange)
    mov     r3, r0                      // save the start address
    add     r2, r0, r1                  // calculate the end address
    bic     r0, r0, #(MCALLIB_CACHE_LINE-1)     // align the start with a cache line
Mcal_CleanLoop:
    mcr     p15, 0, r0, c7, c10, 1      // clean cache to PoC by MVA
    add     r0, r0, #MCALLIB_CACHE_LINE
    cmp     r0, r2
    blo     Mcal_CleanLoop

    dsb
    bx      lr
ASM_FUNCTION_END(Mcal_CleanCacheRange)

    ASM_PUBLIC Mcal_CleanInvalidateCacheRange
    /* void Mcal_CleanInvalidateCacheRange(unsigned long start, uint32 len); */
ASM_FUNCTION(Mcal_CleanInvalidateCacheRange)
    mov     r3, r0                      // save the start address
    add     r2, r0, r1                  // calculate the end address
    bic     r0, r0, #(MCALLIB_CACHE_LINE-1)     // align the start with a cache line
Mcal_FlushLoop:
    mcr     p15, 0, r0, c7, c14, 1      // clean & invalidate dcache to PoC by MVA
    add     r0, r0, #MCALLIB_CACHE_LINE
    cmp     r0, r2
    blo     Mcal_FlushLoop
    dsb
    bx      lr
ASM_FUNCTION_END(Mcal_CleanInvalidateCacheRange)

    ASM_PUBLIC Mcal_InvalidateCacheRange
    /* void Mcal_InvalidateCacheRange(unsigned long start, uint32 len); */
ASM_FUNCTION(Mcal_InvalidateCacheRange)
    mov     r3, r0                      // save the start address
    add     r2, r0, r1                  // calculate the end address
    bic     r0, r0, #(MCALLIB_CACHE_LINE-1)     // align the start with a cache line
Mcal_InvLoop:
    mcr     p15, 0, r0, c7, c6, 1       // invalidate dcache to PoC by MVA
    add     r0, r0, #MCALLIB_CACHE_LINE
    cmp     r0, r2
    blo     Mcal_InvLoop
    dsb
    bx      lr
ASM_FUNCTION_END(Mcal_InvalidateCacheRange)

#if defined(__IASMARM__) || defined(__ICCARM__)
    END
#endif /** #if defined(__IASMARM__) || defined(__ICCARM__) */
/* End of file */
