    .global trap_entry
    .global trap_end

    .section .text
// must 4 bytes aligned when loaded to stvec
    .align 2

// sscratch stores tf_ptr now
trap_entry:
    // swap a0 and sscratch, stores tf_ptr in a0
    csrrw a0, sscratch, a0
    // store all the general-purpose registers
    sd ra, 16(a0)
    sd sp, 24(a0)
    sd gp, 32(a0)
    sd tp, 40(a0)
    sd t0, 48(a0)
    sd t1, 56(a0)
    sd t2, 64(a0)
    sd s0, 72(a0)
    sd s1, 80(a0)
    // can't store initial a0, it stores in sscratch now
    sd a1, 96(a0)
    sd a2, 104(a0)
    sd a3, 112(a0)
    sd a4, 120(a0)
    sd a5, 128(a0)
    sd a6, 136(a0)
    sd a7, 144(a0)
    sd s2, 152(a0)
    sd s3, 160(a0)
    sd s4, 168(a0)
    sd s5, 176(a0)
    sd s6, 184(a0)
    sd s7, 192(a0)
    sd s8, 200(a0)
    sd s9, 208(a0)
    sd s10, 216(a0)
    sd s11, 224(a0)
    sd t3, 232(a0)
    sd t4, 240(a0)
    sd t5, 248(a0)
    sd t6, 256(a0)
    // get initial a0 from sscratch
    csrr t0, sscratch
    sd t0, 88(a0)
    // store sepc
    csrr t0, sepc
    sd t0, 8(a0)
    // switch to kernel stack
    ld sp, 0(a0)
    call trap

// a0 stores tf_ptr now
trap_end:
    // store tf_ptr->a0 in sscratch
    ld t0, 88(a0)
    csrw sscratch, t0
    // restore all general-purpose registers
    ld ra, 16(a0)
    ld sp, 24(a0)
    ld gp, 32(a0)
    ld tp, 40(a0)
    ld t0, 48(a0)
    ld t1, 56(a0)
    ld t2, 64(a0)
    ld s0, 72(a0)
    ld s1, 80(a0)
    // a0 can't be restored now, it stores tp_ptr now
    ld a1, 96(a0)
    ld a2, 104(a0)
    ld a3, 112(a0)
    ld a4, 120(a0)
    ld a5, 128(a0)
    ld a6, 136(a0)
    ld a7, 144(a0)
    ld s2, 152(a0)
    ld s3, 160(a0)
    ld s4, 168(a0)
    ld s5, 176(a0)
    ld s6, 184(a0)
    ld s7, 192(a0)
    ld s8, 200(a0)
    ld s9, 208(a0)
    ld s10, 216(a0)
    ld s11, 224(a0)
    ld t3, 232(a0)
    ld t4, 240(a0)
    ld t5, 248(a0)
    ld t6, 256(a0)
    // swap sscratch and a0
    csrrw a0, sscratch, a0
    // now sscracth stores tf_ptr, a0 stores tf_ptr->a0(syscall return value)
    sret
