// Copyright (c) 2020 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
// OS-Lab-2020 (i.e., ChCore) is licensed under the Mulan PSL v1.
// You can use this software according to the terms and conditions of the Mulan PSL v1.
// You may obtain a copy of Mulan PSL v1 at: http://license.coscl.org.cn/MulanPSL
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
// PURPOSE.
// See the Mulan PSL v1 for more details.

// Import common types
// Assuming u64 from common::types

// use core::borrow::BorrowMut;
use crate::common::{panic, types::size_t};
use crate::mm::mm::PAGE_SIZE;
use crate::BUG_ON;
use core::borrow::BorrowMut;

use crate::common::{
    mmu::VmrProp,
    types::{paddr_t, vaddr_t},
};

use bitflags::bitflags;
// use core::ptr;
use core::ptr;

// Constants
pub const INNER_SHAREABLE: u64 = 0x3;
/* Please search mair_el1 for these memory types. */
pub const NORMAL_MEMORY: u64 = 0x4;
pub const DEVICE_MEMORY: u64 = 0x0;

/* Description bits in page table entries. */

/* Read-write permission. */
pub const AARCH64_PTE_AP_HIGH_RW_EL0_RW: u64 = 0b01;
pub const AARCH64_PTE_AP_HIGH_RO_EL0_RO: u64 = 0b11;
/* X: execution permission. U: unprivileged. P: privileged. */
pub const AARCH64_PTE_UX: u64 = 0b0;
pub const AARCH64_PTE_UXN: u64 = 0b1;
pub const AARCH64_PTE_PXN: u64 = 0b1;
/* Access flag bit. */
pub const AARCH64_PTE_AF_ACCESSED: u64 = 0b1;
/* Present (valid) bit. */
pub const AARCH64_PTE_INVALID_MASK: u64 = 0b1;
/* Table bit: whether the next level is aonther pte or physical memory page. */
pub const AARCH64_PTE_TABLE_MASK: u64 = 0b1;
// Macros
#[macro_export]
macro_rules! IS_PTE_INVALID {
    ($pte:expr) => {
        !($pte & AARCH64_PTE_INVALID_MASK != 0)
    };
}
#[macro_export]
macro_rules! IS_PTE_TABLE {
    ($pte:expr) => {
        $pte & AARCH64_PTE_TABLE_MASK != 0
    };
}
/* PAGE_SIZE (4k) == (1 << (PAGE_SHIFT)) */
pub const PAGE_SHIFT: u64 = 12;
//pub const PAGE_SIZE: u64 = 0x1000;
pub const PAGE_MASK: u64 = PAGE_SIZE as u64 - 1;
pub const PAGE_ORDER: u64 = 9;

pub const PTP_INDEX_MASK: u64 = (1 << PAGE_ORDER) - 1;
pub const L0_INDEX_SHIFT: u64 = (3 * PAGE_ORDER) + PAGE_SHIFT;
pub const L1_INDEX_SHIFT: u64 = (2 * PAGE_ORDER) + PAGE_SHIFT;
pub const L2_INDEX_SHIFT: u64 = (1 * PAGE_ORDER) + PAGE_SHIFT;
pub const L3_INDEX_SHIFT: u64 = (0 * PAGE_ORDER) + PAGE_SHIFT;
#[macro_export]
macro_rules! get_l0_index {
    ($addr:expr) => {
        (($addr >> L0_INDEX_SHIFT) & PTP_INDEX_MASK)
            .try_into()
            .unwrap()
    };
}
#[macro_export]
macro_rules! get_l1_index {
    ($addr:expr) => {
        (($addr >> L1_INDEX_SHIFT) & PTP_INDEX_MASK)
            .try_into()
            .unwrap()
    };
}
#[macro_export]
macro_rules! get_l2_index {
    ($addr:expr) => {
        (($addr >> L2_INDEX_SHIFT) & PTP_INDEX_MASK)
            .try_into()
            .unwrap()
    };
}
#[macro_export]
macro_rules! get_l3_index {
    ($addr:expr) => {
        (($addr >> L3_INDEX_SHIFT) & PTP_INDEX_MASK)
            .try_into()
            .unwrap()
    };
}

pub const PTP_ENTRIES: u64 = 1 << PAGE_ORDER;
/* Number of 4KB-pages that an Lx-block describes */
pub const L0_PER_ENTRY_PAGES: u64 = PTP_ENTRIES * L1_PER_ENTRY_PAGES;
pub const L1_PER_ENTRY_PAGES: u64 = PTP_ENTRIES * L2_PER_ENTRY_PAGES;
pub const L2_PER_ENTRY_PAGES: u64 = PTP_ENTRIES * L3_PER_ENTRY_PAGES;
pub const L3_PER_ENTRY_PAGES: u64 = 1;

/* Bitmask used by GET_VA_OFFSET_Lx */
pub const L1_BLOCK_MASK: u64 = (L1_PER_ENTRY_PAGES << PAGE_SHIFT) - 1;
pub const L2_BLOCK_MASK: u64 = (L2_PER_ENTRY_PAGES << PAGE_SHIFT) - 1;
pub const L3_PAGE_MASK: u64 = (L3_PER_ENTRY_PAGES << PAGE_SHIFT) - 1;
#[macro_export]
macro_rules! get_va_offset_l1 {
    ($va:expr) => {
        $va & L1_BLOCK_MASK
    };
}
#[macro_export]
macro_rules! get_va_offset_l2 {
    ($va:expr) => {
        $va & L2_BLOCK_MASK
    };
}
#[macro_export]
macro_rules! get_va_offset_l3 {
    ($va:expr) => {
        $va & L3_PAGE_MASK
    };
}

//u64 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;

/* table format */
bitflags! {
    pub struct TableFlags: u64 {
        const IS_VALID = 0b1000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const IS_TABLE = 0b0100_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const IGNORE_D1 = 0b0011_1111_1111_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const NEXT_TABLE_ADDR = 0b0000_0000_0000_1111_1111_1111_1111_1111_1111_1111_1111_1111_0000_0000_0000_0000;
        const RESERVED = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_1111_0000_0000_0000;
        const IGNORE_D2 = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_1111_1110_0000;
        const PXN_TABLE = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0001_0000;
        const XN_TABLE = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_1000;
        const AP_TABLE = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0110;
        const NS_TABLE = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0001;
    }

    pub struct L1BlockFlags: u64 {
        const IS_VALID = 0b1000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const IS_TABLE = 0b0100_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const ATTR_INDEX = 0b0011_1000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const NS = 0b0000_0100_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const AP = 0b0000_0011_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const SH = 0b0000_0000_1100_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const AF = 0b0000_0000_0010_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const NG = 0b0000_0000_0001_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const RESERVE_D1 = 0b0000_0000_0000_1111_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const NT = 0b0000_0000_0000_0000_1000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const RESERVE_D2 = 0b0000_0000_0000_0000_0111_1111_1111_1100_0000_0000_0000_0000_0000_0000_0000_0000;
        const PFN = 0b0000_0000_0000_0000_0000_0000_0000_0011_1111_1111_1111_1111_0000_0000_0000_0000;
        const RESERVE_D3 = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_1100_0000_0000_0000;
        const GP = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0010_0000_0000_0000;
        const RESERVE_D4 = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0001_0000_0000_0000;
        const DBM = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_1000_0000_0000;
        const CONTIGUOUS = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0100_0000_0000;
        const PXN = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0010_0000_0000;
        const UXN = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0001_0000_0000;
        const SOFT_RESERVED = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_1111_0000;
        const PBHA = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_1111;
    }

    pub struct L2BlockFlags: u64 {
        const IS_VALID = 0b1000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const IS_TABLE = 0b0100_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const ATTR_INDEX = 0b0011_1000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const NS = 0b0000_0100_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const AP = 0b0000_0011_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const SH = 0b0000_0000_1100_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const AF = 0b0000_0000_0010_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const NG = 0b0000_0000_0001_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const RESERVE_D1 = 0b0000_0000_0000_1111_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const NT = 0b0000_0000_0000_0000_1000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const RESERVE_D2 = 0b0000_0000_0000_0000_0111_1000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const PFN = 0b0000_0000_0000_0000_0000_0111_1111_1111_1111_1111_1111_1111_0000_0000_0000_0000;
        const RESERVE_D3 = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_1100_0000_0000_0000;
        const GP = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0010_0000_0000_0000;
        const RESERVE_D4 = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0001_0000_0000_0000;
        const DBM = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_1000_0000_0000;
        const CONTIGUOUS = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0100_0000_0000;
        const PXN = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0010_0000_0000;
        const UXN = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0001_0000_0000;
        const SOFT_RESERVED = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_1111_0000;
        const PBHA = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_1111;
    }

    pub struct L3PageFlags: u64 {
        const IS_VALID = 0b1000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const IS_PAGE = 0b0100_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const ATTR_INDEX = 0b0011_1000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const NS = 0b0000_0100_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const AP = 0b0000_0011_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const SH = 0b0000_0000_1100_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const AF = 0b0000_0000_0010_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const NG = 0b0000_0000_0001_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000;
        const PFN = 0b0000_0000_0000_1111_1111_1111_1111_1111_1111_1111_1111_1111_0000_0000_0000_0000;
        const RESERVED = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_1110_0000_0000_0000;
        const DBM = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0001_0000_0000_0000;
        const CONTIGUOUS = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_1000_0000_0000;
        const PXN = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0100_0000_0000;
        const UXN = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0010_0000_0000;
        const SOFT_RESERVED = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0001_1110_0000;
        const PBHA = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0001_1110;
        const IGNORED = 0b0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0001;
    }

}

// PteT enum
#[derive(Debug)]
pub enum PteT {
    Table(TableFlags),
    L1Block(L1BlockFlags),
    L2Block(L2BlockFlags),
    L3Page(L3PageFlags),
    Pte(u64),
}

impl L3PageFlags {
    pub fn update_pte_flags_left(&mut self, start_bit: usize, length: usize, value: u64) {
        let left_start_bit = 63 - start_bit;
        let mask = !0u64 >> (64 - length) << left_start_bit;
        // 清零指定位
        self.bits &= !mask;
        // 设置新值
        self.bits |= (value << left_start_bit) & mask;
    }
}

impl TableFlags {
    pub fn update_pte_flags_left(&mut self, start_bit: usize, length: usize, value: u64) {
        let left_start_bit = 63 - start_bit;
        let mask = !0u64 >> (64 - length) << left_start_bit;
        // 清零指定位
        self.bits &= !mask;
        // 设置新值
        self.bits |= (value << left_start_bit) & mask;
    }
}

// PtpT struct
#[derive(Debug)]
pub struct PtpT {
    pub ent: [PteT; PTP_ENTRIES as usize],
}

// page_table.c

use crate::common::kprint::*;
use crate::common::mmu::*;
use crate::common::printk::*;
use crate::common::vars::*;
use crate::common::{errno::*, kprint};

use crate::{kdebug, kwarn};

// Define the PteT enum and TableFlags, L1BlockFlags, L2BlockFlags, L3PageFlags structs here (unchanged)

// Set of constants
pub const USER_PTE: i32 = 0;
pub const KERNEL_PTE: i32 = 1;
pub const NORMAL_PTP: i32 = 0;
pub const BLOCK_PTP: i32 = 1;

// Helper function to convert C-style indices to Rust-style indices
pub fn get_rust_index(level: u32, va: vaddr_t) -> usize {
    match level {
        0 => get_l0_index!(va),
        1 => get_l1_index!(va),
        2 => get_l2_index!(va),
        3 => get_l3_index!(va),
        _ => panic!("Invalid level: {}", level),
    }
}

// Function to set page table
fn set_page_table(pgtbl: paddr_t) {
    set_ttbr0_el1(pgtbl);
}

// Function to set page table entry flags
pub fn set_pte_flags(entry: &mut PteT, flags: VmrProp, kind: i32) -> i32 {
    // Implement set_pte_flags logic...
    if let PteT::L3Page(ref mut table_flags) = entry {
        if flags & VMR_WRITE != 0 {
            table_flags.update_pte_flags_left(7, 2, AARCH64_PTE_AP_HIGH_RW_EL0_RW);
        } else {
            table_flags.update_pte_flags_left(7, 2, AARCH64_PTE_AP_HIGH_RO_EL0_RO);
        }
        if flags & VMR_WRITE != 0 {
            table_flags.update_pte_flags_left(7, 2, AARCH64_PTE_AP_HIGH_RW_EL0_RW);
        } else {
            table_flags.update_pte_flags_left(7, 2, AARCH64_PTE_AP_HIGH_RO_EL0_RO);
        }
        if flags & VMR_EXEC != 0 {
            table_flags.update_pte_flags_left(55, 1, AARCH64_PTE_UX);
        } else {
            table_flags.update_pte_flags_left(55, 1, AARCH64_PTE_UXN);
        }
        // EL1 cannot directly execute EL0 accessiable region.
        if kind == USER_PTE {
            table_flags.update_pte_flags_left(54, 1, AARCH64_PTE_PXN);
        }
        table_flags.update_pte_flags_left(11, 1, AARCH64_PTE_AF_ACCESSED);
        // inner sharable
        table_flags.update_pte_flags_left(9, 2, INNER_SHAREABLE);
        // memory type
        table_flags.update_pte_flags_left(3, 3, NORMAL_MEMORY);
    }

    0
}

// 获取PTE中的下一级页表物理地址
#[macro_export]
macro_rules! GET_PADDR_IN_PTE {
    ($entry:expr) => {
        unsafe {
            match *$entry {
                PteT::Table(ref table) => {
                    // 获取next_table_addr
                    let mask = (1u64 << 36) - 1;
                    ((table.bits >> 12) & mask) << PAGE_SHIFT
                }
                PteT::L1Block(_) => 0,
                PteT::L2Block(_) => 0,
                PteT::L3Page(_) => 0,
                PteT::Pte(_) => 0,
            }
        }
    };
}

// 获取下一级页表物理地址
#[macro_export]
macro_rules! GET_NEXT_PTP {
    ($entry:expr) => {
        phys_to_virt(GET_PADDR_IN_PTE!($entry))
    };
}

/*
 * Find next page table page for the "va".
 *
 * cur_ptp: current page table page
 * level:   current ptp level
 *
 * next_ptp: returns "next_ptp"
 * pte     : returns "pte" (points to next_ptp) in "cur_ptp"
 *
 * alloc: if true, allocate a ptp when missing
 *
 */
// Function to get the next page table page
pub fn get_next_ptp(
    cur_ptp: *mut PtpT,
    level: u32,
    va: vaddr_t,
    next_ptp: *mut *mut PtpT,
    pte: *mut *mut PteT,
    alloc: bool,
) -> i32 {
    let index = 0;
    let entry: *mut PteT;
    let entry_pte: &mut u64;
    if cur_ptp.is_null() {
        return -ENOMAPPING;
    }
    get_rust_index(level, va);

    unsafe {
        entry = (*cur_ptp).ent[index].borrow_mut();
        if let PteT::Pte(entry_pte) = (*entry).borrow_mut() {
            if (IS_PTE_INVALID!(entry_pte.clone())) {
                if alloc == false {
                    return -ENOMAPPING;
                } else {
                    /* alloc a new page table page */
                    let new_ptp: *mut PteT;
                    let mut new_ptp_paddr: paddr_t;
                    let mut new_pte_val: PteT;
                    // binding value initiating
                    new_pte_val = PteT::Pte(0);
                    /* alloc a single physical page as a new page table page */
                    new_ptp = get_pages(0);
                    BUG_ON!(new_ptp.is_null());
                    unsafe {
                        ptr::write_bytes(new_ptp, 0u8, PAGE_SIZE as usize);
                    }
                    new_ptp_paddr = virt_to_phys(new_ptp as vaddr_t);
                    if let PteT::Pte(ref mut new_pte) = new_pte_val {
                        *new_pte = 0;
                    }
                    if let PteT::Table(ref mut new_table) = new_pte_val {
                        new_table.update_pte_flags_left(0, 1, 0b1);
                        new_table.update_pte_flags_left(1, 1, 0b1);
                        new_table.update_pte_flags_left(13, 36, new_ptp_paddr >> PAGE_SHIFT);
                    }
                    /* same effect as: cur_ptp->ent[index] = new_pte_val; */
                    unsafe {
                        *entry_pte = 0;
                    }
                }
            }
            unsafe {
                let address = GET_NEXT_PTP!(entry);
                //ptr::write_unaligned(*next_ptp, ptr::read_unaligned(address as *const u64 as *const PtpT));
                *next_ptp = address as *mut PtpT;
                *pte = entry;
            }
            if (IS_PTE_TABLE!(entry_pte.clone())) {
                return NORMAL_PTP;
            } else {
                return BLOCK_PTP;
            }
        } else {
            return -1;
        }
    }
    /*
    let entry_pte: &mut PteT = match unsafe { &mut *entry } {
        PteT::Pte(ref mut entry_pte) => entry_pte,
        _ => panic!("Unexpected PteT variant encountered!"), // Handle other variants if needed
    };
    */
}

/*
 * Translate a va to pa, and get its pte for the flags
 */
/*
 * query_in_pgtbl: translate virtual address to physical
 * address and return the corresponding page table entry
 *
 * pgtbl @ ptr for the first level page table(pgd) virtual address
 * va @ query virtual address
 * pa @ return physical address
 * entry @ return page table entry
 *
 * Hint: check the return value of get_next_ptp, if ret == BLOCK_PTP
 * return the pa and block entry immediately
 */
// Function to translate virtual address to physical address and return the corresponding page table entry
pub fn query_in_pgtbl(
    pgtbl: *mut vaddr_t,
    va: vaddr_t,
    pa: *mut paddr_t,
    entry: *mut *mut PteT,
) -> i32 {
    // Implement query_in_pgtbl logic...
    let mut cur_ptp: *mut PtpT = pgtbl as *mut PtpT;
    let mut next_ptp: *mut PtpT = core::ptr::null_mut();
    let mut next_pte: *mut PteT = core::ptr::null_mut();
    /*
    unsafe{
        ptr::write_unaligned(cur_ptp, ptr::read_unaligned(pgtbl as *const u64 as *const PtpT));
    }*/
    let mut level = 0;
    let mut err = 0;
    // get page pte
    while level <= 3 {
        err = get_next_ptp(
            cur_ptp,
            level,
            va,
            next_ptp.borrow_mut(),
            next_pte.borrow_mut(),
            false,
        );
        if err == NORMAL_PTP {
            cur_ptp = next_ptp;
            level += 1;
        }
    }

    if err == NORMAL_PTP {
        // TODO: add hugepage support
        if level != 4 {
            kwarn!("query_in_pgtbl: level = {} < 4\n", level);
            return -ENOMAPPING;
        }
        // page invalid
        if true {
            return -ENOMAPPING;
        }
        // get phys addr
        unsafe {
            *pa = virt_to_phys(next_pte as vaddr_t) + get_va_offset_l3!(va);
        }
        return 0;
    }
    // get an error
    else if err < 0 {
        return err;
    }
    // should never be here
    else {
        kwarn!("query_in_pgtbl: should never be here\n");
        return err;
    }
    // </lab2>
    0
}

/*
 * map_range_in_pgtbl: map the virtual address [va:va+size] to
 * physical address[pa:pa+size] in given pgtbl
 *
 * pgtbl @ ptr for the first level page table(pgd) virtual address
 * va @ start virtual address
 * pa @ start physical address
 * len @ mapping size
 * flags @ corresponding attribution bit
 *
 * Hint: In this function you should first invoke the get_next_ptp()
 * to get the each level page table entries. Read type pte_t carefully
 * and it is convenient for you to call set_pte_flags to set the page
 * permission bit. Don't forget to call flush_tlb at the end of this function
 */

// Function to map the virtual address [va:va+size] to physical address[pa:pa+size] in the given page table
pub fn map_range_in_pgtbl(
    pgtbl: *mut vaddr_t,
    va: vaddr_t,
    mut pa: paddr_t,
    len: size_t,
    flags: VmrProp,
) -> i32 {
    // Implement map_range_in_pgtbl logic...
    for v in (va..va + len).step_by(PAGE_SIZE as usize) {
        let mut cur_ptp = pgtbl as *mut PtpT;
        let mut next_ptp: *mut PtpT = core::ptr::null_mut();
        let mut next_pte: *mut PteT = core::ptr::null_mut();
        let mut level = 0;
        let mut err = 0;
        // get and create L3 ptp
        while level <= 2 {
            err = get_next_ptp(
                cur_ptp,
                level,
                va,
                next_ptp.borrow_mut(),
                next_pte.borrow_mut(),
                true,
            );
            if err != NORMAL_PTP {
                break;
            }
            cur_ptp = next_ptp;
            level += 1;
        }
        // map phys addr
        let index: usize = get_l3_index!(va);
        unsafe {
            let mut pte = &mut (*cur_ptp).ent[index];
            let next_pte = &mut pte;
            if let PteT::L3Page(ref mut table_flags) = **next_pte {
                table_flags.update_pte_flags_left(0, 1, 0b1);
                table_flags.update_pte_flags_left(1, 1, 0b1);
                table_flags.update_pte_flags_left(13, 36, pa >> PAGE_SHIFT);
                set_pte_flags(&mut **next_pte, flags, KERNEL_PTE);
            }
        }
        pa += PAGE_SIZE;
    }
    flush_tlb();
    0
}
/*
 * unmap_range_in_pgtble: unmap the virtual address [va:va+len]
 *
 * pgtbl @ ptr for the first level page table(pgd) virtual address
 * va @ start virtual address
 * len @ unmapping size
 *
 * Hint: invoke get_next_ptp to get each level page table, don't
 * forget the corner case that the virtual address is not mapped.
 * call flush_tlb() at the end of function
 *
 */
// Function to unmap the virtual address [va:va+len]
pub fn unmap_range_in_pgtbl(pgtbl: *mut vaddr_t, va: vaddr_t, len: size_t) -> i32 {
    // Implement unmap_range_in_pgtbl logic...
    for v in (va..va + len).step_by(PAGE_SIZE as usize) {
        let mut cur_ptp = pgtbl as *mut PtpT;
        let mut next_ptp: *mut PtpT = core::ptr::null_mut();
        let mut next_pte: *mut PteT = core::ptr::null_mut();
        let mut level = 0;
        let mut err = 0;
        // get L3 ptp
        while level <= 2 {
            err = get_next_ptp(
                cur_ptp,
                level,
                va,
                next_ptp.borrow_mut(),
                next_pte.borrow_mut(),
                false,
            );
            if err != NORMAL_PTP {
                break;
            }
            cur_ptp = next_ptp;
            level += 1;
        }

        if err == NORMAL_PTP && level == 3 && !cur_ptp.is_null() {
            // unmap page
            let index: usize = get_l3_index!(va);
            unsafe {
                next_pte = (*cur_ptp).ent[index].borrow_mut();
                if let PteT::Pte(mut pte) = *next_pte {
                    pte = 0;
                }
            }
        }
    }
    flush_tlb();
    // </lab2>
    0
}

// TODO: Add other necessary functions and implementations...

pub fn flush_tlb() {}

pub fn set_ttbr0_el1(p: paddr_t) {}

pub fn get_pages(order: u64) -> *mut PteT {
    let ptr = core::ptr::null_mut();
    let err = 0;
    //err = posix_memalign(&ptr, 0x1000, 0x1000);
    if err != 0 {
        return core::ptr::null_mut();
    }
    return ptr;
}

/*
/// Map a range in the page table
pub fn map_range_in_pgtbl(
    pgtbl: *const vaddr_t,
    va: vaddr_t,
    pa: paddr_t,
    len: usize,
    flags: VmrProp,
) -> i32 {
    todo!()
}

/// Unmap a range in the page table
#[no_mangle]
pub extern "C" fn unmap_range_in_pgtbl(pgtbl: *const vaddr_t, va: vaddr_t, len: usize) -> i32 {
    todo!()
}
*/
