#include "include/hvc.h"
#include "include/blk.h"
#include "include/config.h"
#include "include/device.h"
#include "include/unilib.h"
#include <linux/delay.h>

u64 kva2ipa(u64 va) {
    u64 par = 0, par_saved = 0;
    MRS(par_saved, PAR_EL1);
    asm volatile("AT S1E1W, %0" ::"r"(va));
    MRS(par, PAR_EL1);
    MSR(PAR_EL1, par_saved);

    return (par & PAR_MASK) | (((uint64_t)va) & (0x1000 - 1));
}

#if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 9, 263)
static void keep_alive_timer(unsigned long vmid) {
    WARNING("%s: vm[%d] not alive", __func__, (u32)vmid);

    handle_hvc(HVC_IVC, HVC_IVC_INIT_KEEP_ALIVE, 0);
    mod_timer(&guset_os_timer[vmid], jiffies + 20 * HZ);
}
#else
static void keep_alive_timer(struct timer_list *kernel_timer) {
    u32 vmid = kernel_timer - guset_os_timer;
    WARNING("%s: vm[%d] not alive", __func__, vmid);

    handle_hvc(HVC_IVC, HVC_IVC_INIT_KEEP_ALIVE, 0);
    mod_timer(kernel_timer, jiffies + 20 * HZ);
}
#endif

bool handle_hvc(uint64_t hvc_fid, uint64_t event, uint64_t arg) {
    switch (hvc_fid) {
    case HVC_SYS:
        return handle_hvc_sys(event, arg);
    case HVC_VMM:
        return handle_hvc_vmm(event, arg);
    case HVC_IVC:
        return handle_hvc_ivc(event, arg);
    case HVC_MEDIATED:
        return handle_hvc_mediated(event, arg);
    case HVC_SECURITY:
        return handle_hvc_security(event, arg);
    case IOCTL_SYS:
        return handle_ioctl_sys(event, arg);
    case HVC_CONFIG:
        return handle_hvc_config(event, arg);
    case HVC_UNILIB:
        return handle_hvc_unilib(event, arg);
    case HVC_SHARE:
        return handle_hvc_share(event, arg);
    default:
        return false;
    }
}

bool handle_ioctl_sys(u64 event, u64 arg) {
    int i;

    switch (event) {
    case IOCTL_SYS_RECEIVE_MSG: {
        struct msg_data *msg = (struct msg_data *)shyper_dev.cfg_ptr;
        struct {
            u64 fin_addr;
            u64 size_addr;
            u64 data_addr;
        } ioctl_arg;
        INFO("receive message from vm[%d]", (u32)msg->src);
        if (copy_from_user(&ioctl_arg, (void *)arg, sizeof(ioctl_arg))) {
            WARNING("%s: failed to copy from user", __func__);
        }
        if (copy_to_user((void *)ioctl_arg.fin_addr, &msg->fin, sizeof(u64))) {
            WARNING("%s: failed to copy to user", __func__);
        }
        if (copy_to_user((void *)ioctl_arg.size_addr, &msg->size,
                         sizeof(u64))) {
            WARNING("%s: failed to copy to user", __func__);
        }
        if (copy_to_user((void *)ioctl_arg.data_addr,
                         (void *)shyper_dev.receive_queue[msg->src],
                         msg->size)) {
            WARNING("%s: failed to copy to user", __func__);
        }

        return true;
    }
    case IOCTL_SYS_INIT_USR_PID: {
        shyper_dev.usr_pid = arg;
        rcu_read_lock();
        shyper_dev.current_task =
            pid_task(find_vpid(shyper_dev.usr_pid), PIDTYPE_PID);
        rcu_read_unlock();
        return true;
    }
    case IOCTL_SYS_GET_SEND_IDX: {
        struct {
            u64 vmid;
            u64 ack_idx_addr;
        } hvc_arg;

        if (copy_from_user(&hvc_arg, (void *)arg, sizeof(hvc_arg))) {
            WARNING("%s: copy from user failed", __func__);
            return false;
        }

        if (copy_to_user((void *)hvc_arg.ack_idx_addr,
                         &shyper_dev.send_idx[hvc_arg.vmid], sizeof(u64))) {
            WARNING("%s: copy from user failed", __func__);
            return false;
        }
        return true;
    }
    case IOCTL_SYS_GET_VMID: {
        if (copy_to_user((void *)arg, &cur_vm_id, sizeof(int))) {
            WARNING(
                "%s: copy from user failed IOCTL_SYS_GET_VMID cur VM id %lld\n",
                __func__, cur_vm_id);
            return false;
        }
        return true;
    }
    case IOCTL_SYS_SET_KERNEL_IMG_NAME: {
        for (i = 0; i < VM_NUM_MAX; i++) {
            if (vm_kernel_image_info_list[i].vm_id == 0) {
                break;
            }
        }
        if (i == VM_NUM_MAX) {
            WARNING("No space for new VM, current vm num %d\n", vm_num);
            return false;
        }
        if (copy_from_user(&vm_kernel_image_info_list[i], (void *)arg,
                           sizeof(struct vm_kernel_image_info))) {
            WARNING("%s: copy from user failed", __func__);
            return false;
        }
        INFO("VM[%lld] set kernel image name %s vm num %d\n",
             vm_kernel_image_info_list[i].vm_id,
             vm_kernel_image_info_list[i].image_name, vm_num + 1);
        vm_num++;
        return true;
    }
    case IOCTL_SYS_GET_KERNEL_IMG_NAME: {
        struct {
            u64 vm_id;
            char *name_addr;
        } name_arg;
        if (copy_from_user(&name_arg, (void *)arg, sizeof(name_arg))) {
            WARNING("%s: copy from user failed", __func__);
            return false;
        }
        for (i = 0; i < VM_NUM_MAX; i++) {
            if (vm_kernel_image_info_list[i].vm_id == name_arg.vm_id) {
                if (copy_to_user((void *)name_arg.name_addr,
                                 vm_kernel_image_info_list[i].image_name,
                                 NAME_MAX_LENGTH)) {
                    WARNING("%s: copy from user failed", __func__);
                    return false;
                }
                INFO("VM[%lld] get kernel image name %s\n",
                     vm_kernel_image_info_list[i].vm_id,
                     vm_kernel_image_info_list[i].image_name);
                return true;
            }
        }
        WARNING("VM[%lld] failed to get image name\n", name_arg.vm_id);
        return false;
    }
    case IOCTL_SYS_APPEND_MED_BLK: {
        return mediated_blk_append(arg);
    }
    default:
        return false;
    };
}

bool handle_hvc_sys(uint64_t event, uint64_t arg) {
    switch (event) {
    case HVC_SYS_REBOOT:
        hvc_call(0, 0, 0, 0, 0, 0, 0, HVC_MODE(HVC_SYS, HVC_SYS_REBOOT));
        return true;
    case HVC_SYS_SHUTDOWN:
        hvc_call(0, 0, 0, 0, 0, 0, 0, HVC_MODE(HVC_SYS, HVC_SYS_SHUTDOWN));
        return true;
    case HVC_SYS_UPDATE:
        hvc_call(arg, 0, 0, 0, 0, 0, 0, HVC_MODE(HVC_SYS, HVC_SYS_UPDATE));
        return true;
    case HVC_SYS_TEST:
        hvc_call(0, 0, 0, 0, 0, 0, 0, HVC_MODE(HVC_SYS, HVC_SYS_TEST));
        return true;
    default:
        return false;
    }
}

bool handle_hvc_vmm(uint64_t event, uint64_t arg) {
    u64 ret = 0;
    int i;
    switch (event) {
    case HVC_VMM_LIST_VM: {
        struct {
            u64 vm_num;
            struct vm_info info_list[];
        } *hvc_arg = vmalloc(VM_PAGE_SIZE);

        ret = hvc_call(kva2ipa((u64)hvc_arg), 0, 0, 0, 0, 0, 0,
                       HVC_MODE(HVC_VMM, HVC_VMM_LIST_VM));
        if (ret != 0) {
            WARNING("%s: hvc call vmm_list_vm not finish", __func__);
            return false;
        }
        if (copy_to_user((void *)arg, hvc_arg, VM_PAGE_SIZE)) {
            WARNING("%s: failed to copy to user", __func__);
            vfree(hvc_arg);
            return false;
        }
        vfree(hvc_arg);
        return true;
    }
    case HVC_VMM_BOOT_VM: {
        ret =
            hvc_call(arg, 0, 0, 0, 0, 0, 0, HVC_MODE(HVC_VMM, HVC_VMM_BOOT_VM));
        if (ret != 0) {
            WARNING("%s: hvc call vmm_boot_vm not finish", __func__);
            return false;
        }
        return true;
    }
    case HVC_VMM_SHUTDOWN_VM: {
        ret = hvc_call(arg, 0, 0, 0, 0, 0, 0,
                       HVC_MODE(HVC_VMM, HVC_VMM_SHUTDOWN_VM));
        if (ret != 0) {
            WARNING("%s: hvc call vmm_shutdown_vm not finish", __func__);
            return false;
        }
        return true;
    }
    case HVC_VMM_REBOOT_VM: {
        ret = hvc_call(arg, 0, 0, 0, 0, 0, 0,
                       HVC_MODE(HVC_VMM, HVC_VMM_REBOOT_VM));
        if (ret != 0) {
            WARNING("%s: hvc call vmm_reboot_vm not finish", __func__);
            return false;
        }
        return true;
    }
    case HVC_VMM_GET_VM_DEF_CFG: {
        struct {
            u32 vm_id;
            struct vm_config_enty *entry;
        } hvc_arg;
        struct vm_config_entry *cfg;

        if (copy_from_user(&hvc_arg, (void *)arg, sizeof(hvc_arg))) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }
        cfg = kmalloc(sizeof(struct vm_config_entry), GFP_KERNEL);

        ret = hvc_call(hvc_arg.vm_id, virt_to_phys(cfg), 0, 0, 0, 0, 0,
                       HVC_MODE(HVC_VMM, HVC_VMM_GET_VM_DEF_CFG));
        if (ret != 0) {
            WARNING("%s: hvc call vmm_reboot_vm not finish", __func__);
            kfree(cfg);
            return false;
        }
        if (copy_to_user(hvc_arg.entry, cfg, sizeof(struct vm_config_entry))) {
            WARNING("%s: failed to copy to user", __func__);
        }
        kfree(cfg);
        return true;
    }
    case HVC_VMM_GET_VM_ID: {
        u64 id;
        INFO("hvc vmm get vm id");

        ret = hvc_call(kva2ipa((u64)&id), 0, 0, 0, 0, 0, 0,
                       HVC_MODE(HVC_VMM, HVC_VMM_GET_VM_ID));
        if (ret != 0) {
            WARNING("%s: hvc call vmm_get_vm_id not finish", __func__);
            return false;
        }

        if (copy_to_user((void *)arg, &id, sizeof(u64))) {
            WARNING("%s: failed to copy to user", __func__);
        }
        return true;
    }
    case HVC_VMM_TRACE_VMEXIT: {
        ret = hvc_call(arg, 0, 0, 0, 0, 0, 0,
                       HVC_MODE(HVC_VMM, HVC_VMM_TRACE_VMEXIT));
        return true;
    }
    case HVC_VMM_MIGRATE_START: {
        ret = hvc_call(arg, 0, 0, 0, 0, 0, 0,
                       HVC_MODE(HVC_VMM, HVC_VMM_MIGRATE_START));
        return true;
    }
    case HVC_VMM_MIGRATE_READY: {
        INFO("send HVC_VMM_MIGRATE_READY");
        ret = hvc_call(arg, 0, 0, 0, 0, 0, 0,
                       HVC_MODE(HVC_VMM, HVC_VMM_MIGRATE_READY));
        return true;
    }
    case HVC_VMM_MIGRATE_MEMCPY: {
        INFO("send HVC_VMM_MIGRATE_MEMCPY");
        ret = hvc_call(arg, 0, 0, 0, 0, 0, 0,
                       HVC_MODE(HVC_VMM, HVC_VMM_MIGRATE_MEMCPY));
        return true;
    }
    case HVC_VMM_MIGRATE_FINISH: {
        ret = hvc_call(arg, 0, 0, 0, 0, 0, 0,
                       HVC_MODE(HVC_VMM, HVC_VMM_MIGRATE_FINISH));
        return true;
    }
    case HVC_VMM_MIGRATE_INIT_VM: {
        ret = hvc_call(arg, 0, 0, 0, 0, 0, 0,
                       HVC_MODE(HVC_VMM, HVC_VMM_MIGRATE_INIT_VM));

        return true;
    }
    case HVC_VMM_MIGRATE_VM_BOOT: {
        ret = hvc_call(arg, 0, 0, 0, 0, 0, 0,
                       HVC_MODE(HVC_VMM, HVC_VMM_MIGRATE_VM_BOOT));
        return true;
    }
    case HVC_VMM_VM_REMOVE: {
        ret = hvc_call(arg, 0, 0, 0, 0, 0, 0,
                       HVC_MODE(HVC_VMM, HVC_VMM_VM_REMOVE));
        for (i = 0; i < VM_NUM_MAX; i++) {
            if (vm_kernel_image_info_list[i].vm_id == arg) {
                vm_kernel_image_info_list[i].vm_id = 0;
                memset(vm_kernel_image_info_list[i].image_name, 0,
                       sizeof(NAME_MAX_LEN));
                break;
            }
        }
        if (i == VM_NUM_MAX) {
            WARNING("failed to find VM[%lld] kernel image info in list\n", arg);
            return false;
        }
        vm_num--;
        return true;
    }
    default:
        return false;
    }
}

bool handle_hvc_ivc(uint64_t event, uint64_t arg) {
    u64 ret = 0, i;
    INFO("handle_hvc_ivc in kernel module: event is 0x%08x\n", (u32)event);
    switch (event) {
    case HVC_IVC_UPDATE_MQ: {
        u64 receive_queue[8];
        for (i = 0; i < VM_NUM_MAX; i++) {
            receive_queue[i] = kva2ipa((u64)shyper_dev.receive_queue[i]);
        }

        ret = hvc_call(kva2ipa((u64)receive_queue),
                       kva2ipa((u64)shyper_dev.cfg_queue), 0, 0, 0,
                       0, 0, HVC_MODE(HVC_IVC, HVC_IVC_UPDATE_MQ));
        if (ret < 0) {
            WARNING("%s: hvc call ivc_update_mq not finish!", __func__);
            return false;
        }
        shyper_dev.vmid = ret;
        INFO("[HVC_IVC_UPDATE_MQ] get vmid %lld\n", shyper_dev.vmid);

        return true;
    }
    // VM shared memory ivc, not supported in Rust-shyper.
    // case HVC_IVC_GET_SHARED_MEM_IPA: {
    //     ret = hvc_call(kva2ipa((u64)&BasePhysicalAddrforSend),
    //                    kva2ipa((u64)&BasePhysicalAddrforReceive),
    //                    kva2ipa((u64)&MaxSizeforSend),
    //                    kva2ipa((u64)&MaxSizeforReceive), 0, 0, 0,
    //                    HVC_MODE(HVC_IVC, HVC_IVC_GET_SHARED_MEM_IPA));
    //     if (ret != 0) {
    //         WARNING("%s: hvc call ivc_update_mq not finish!", __func__);
    //         return false;
    //     }
    //     shyper_dev.vmid = ret;

    //     return true;
    // }
    case HVC_IVC_SEND_MSG: {
        u64 entry;
        struct write_arg write_arg;
        if (copy_from_user(&write_arg, (void *)arg, sizeof(struct write_arg))) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }

        if (copy_from_user(shyper_dev.send_queue, (void *)(write_arg.addr),
                           write_arg.size)) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }

        entry = kva2ipa((u64)shyper_dev.send_queue);

        ret =
            hvc_call(write_arg.vm_tgrt, write_arg.size, entry, write_arg.idx,
                     write_arg.fin, 0, 0, HVC_MODE(HVC_IVC, HVC_IVC_SEND_MSG));

        if (ret != 0) {
            WARNING("%s: hvc call ivc_update_mq not finish", __func__);
            return false;
        }

        return true;
    }
    case HVC_IVC_BROADCAST_MSG: {
        // struct write_arg write_arg = (struct write_arg *)arg;
        // u64 entry, size;
        // if (copy_from_user(shyper_dev.send_queue,
        //                    (void *)(write_arg.addr),
        //                    write_arg.size)) {
        //     printk("err: failed to copy from user");
        //     return false;
        // }

        // entry = kva2ipa((u64)shyper_dev.send_queue);
        // size = write_arg.size;
        // ret = hvc_call(size, entry, 0, 0, 0, 0, 0,
        //                HVC_MODE(HVC_IVC, HVC_IVC_BROADCAST_MSG));
        // if (ret != 0) {
        //     printk("err: hvc call ivc_update_mq not finish!");
        //     return false;
        // }
        return true;
    }
    case HVC_IVC_INIT_KEEP_ALIVE: { // VM[0]
        hvc_call(0, 0, 0, 0, 0, 0, 0,
                 HVC_MODE(HVC_IVC, HVC_IVC_INIT_KEEP_ALIVE));
        for (i = 0; i < VM_NUM; ++i) {
            guset_os_timer[i].expires = jiffies + 20 * HZ;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 9, 263)
            setup_timer(&guset_os_timer[i], keep_alive_timer, i);
#else
            timer_setup(&guset_os_timer[i], keep_alive_timer, i);
#endif
            add_timer(&guset_os_timer[i]);
        }
        return true;
    }
    case HVC_IVC_KEEP_ALIVE: {
        hvc_call(0, 0, 0, 0, 0, 0, 0, HVC_MODE(HVC_IVC, HVC_IVC_KEEP_ALIVE));
        return true;
    }
    case HVC_IVC_TEST_SEND: {
        struct send_arg send_arg;
        if (copy_from_user(&send_arg, (void *)arg, sizeof(struct send_arg))) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }
        INFO("[handle_hvc_ivc] kernel module send \n");
        INFO("[handle_hvc_ivc] target is %lld, file size %lld\n", send_arg.vmid,
             send_arg.size);

        hvc_call(send_arg.vmid, send_arg.size, send_arg.page_num, 0, 0, 0, 0,
                 HVC_MODE(HVC_IVC, HVC_IVC_TEST_SEND));
        return true;
    }
    case HVC_IVC_SEND_SHAREMEM_TEST_SPEED: {
        struct send_arg send_arg;
        if (copy_from_user(&send_arg, (void *)arg, sizeof(struct send_arg))) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }
        INFO("[handle_hvc_ivc] kernel module testspeed\n");
        INFO("[handle_hvc_ivc] target is %lld, file size %lld\n", send_arg.vmid,
             send_arg.size);

        hvc_call(send_arg.vmid, send_arg.size, send_arg.page_num, 0, 0, 0, 0,
                 HVC_MODE(HVC_IVC, HVC_IVC_SEND_SHAREMEM_TEST_SPEED));
        return true;
    }
    case HVC_IVC_ACK: {
        struct {
            u64 src;
            u64 idx;
            u64 fin;
        } hvc_arg;

        if (copy_from_user(&hvc_arg, (void *)arg, sizeof(hvc_arg))) {
            WARNING("%s: copy from user failed", __func__);
            return false;
        }

        INFO("%s: send ack[%d] to VM[%d], fin is %d", __func__,
             (int)hvc_arg.idx, (int)hvc_arg.src, (int)hvc_arg.fin);
        hvc_call(hvc_arg.src, hvc_arg.idx, hvc_arg.fin, 0, 0, 0, 0,
                 HVC_MODE(HVC_IVC, HVC_IVC_ACK));
        if (!hvc_arg.fin) {
            shyper_dev.receive_idx[hvc_arg.src] += 1;
        } else {
            shyper_dev.receive_idx[hvc_arg.src] = -1;
        }
        return true;
    }
    case HVC_IVC_GET_TIME: {
        u64 init_time;
        hvc_call(kva2ipa((u64)&init_time), 0, 0, 0, 0, 0, 0,
                 HVC_MODE(HVC_IVC, HVC_IVC_GET_TIME));
        if (copy_to_user((void *)arg, &init_time, sizeof(u64))) {
            WARNING("%s: copy to user failed", __func__);
            return false;
        }
        return true;
    }
    default:
        return false;
    }
}

bool handle_hvc_mediated(u64 event, u64 arg) {
    u64 ret = 0;
    u64 size;
    int i;
    struct input_event *event_l;
    void *event_list;

    switch (event) {
    case HVC_MEDIATED_DEV_NOTIFY: {
        return mediated_blk_notify(arg);
    }
    case HVC_MEDIATED_DEV_INPUT_SEND: {
        struct {
            u32 vmid;
            u32 event_count;
            void *event_addr;
        } usr_arg;

        if (copy_from_user(&usr_arg, (void *)arg, sizeof(usr_arg))) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }
        // DEBUG("Get arg vmid %d event_count %d event_addr 0x%p\n",usr_arg.vmid, usr_arg.event_count, usr_arg.event_addr);
        if (usr_arg.event_count <= 0) {
            WARNING("%s: wrong event_count read from usr_arg", __func__);
            return false;
        }
        size = usr_arg.event_count * sizeof(struct input_event);
        event_list = vmalloc(size);
        if (copy_from_user(event_list, usr_arg.event_addr, size)) {
            WARNING("%s: failed to copy from user", __func__);
            vfree(event_list);
            return false;
        }
        event_l = (struct input_event *)event_list;
        i = 0;
        for (i = 0; i < usr_arg.event_count; i++) {
            // DEBUG("kernel input send vmid %d type %d code %d value %d\n",usr_arg.vmid,event_l[i].type, event_l[i].code, event_l[i].value);
            ret = hvc_call(usr_arg.vmid, event_l[i].type, event_l[i].code,
                           event_l[i].value, 0, 0, 0,
                           HVC_MODE(HVC_MEDIATED, HVC_MEDIATED_DEV_INPUT_SEND));
            if (ret != 0) {
                WARNING("%s: hvc call HVC_MEDIATED_DEV_INPUT_SEND failed",
                        __func__);
                vfree(event_list);
                return false;
            }
        }
        vfree(event_list);
        return true;
    }
    default:
        return false;
    }
}

bool handle_hvc_unilib(u64 event, u64 arg) {
    switch (event) {
    case HVC_UNILIB_FS_APPEND: {
        return unilib_fs_append(arg);
    }
    case HVC_UNILIB_FS_FINISHED: {
        return unilib_fs_finished(arg);
    }
    default:
        return false;
    }
}

bool handle_hvc_shmem(u64 event, u64 arg) {
    u64 ret = 0;
    switch (event) {
    case HVC_SHMEM_INIT: {
        struct {
            u64 usr_addr;
            u64 size;
        } usr_arg;
        void *data_kva[7];

        if (copy_from_user(&usr_arg, (void *)arg, sizeof(usr_arg))) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }
        for (int i = 0; i < 7; i++) {
            data_kva[i] = vmalloc(VM_PAGE_SIZE);
        }
        ret = hvc_call(kva2ipa((u64)data_kva[0]), kva2ipa((u64)data_kva[1]),
                       kva2ipa((u64)data_kva[2]), kva2ipa((u64)data_kva[3]),
                       kva2ipa((u64)data_kva[4]), kva2ipa((u64)data_kva[5]),
                       kva2ipa((u64)data_kva[6),
                       HVC_MODE(HVC_SHARE, HVC_SHARE_MEM_GET_IPA));
        if (ret != 0) {
            WARNING("%s: hvc call share_mem_get_ipa not finish", __func__);
            return false;
        }
        if (copy_to_user((void *)usr_arg.usr_addr, data_kva[0],
                         cal_page_size(usr_arg.size, 0)) ||
            copy_to_user((void *)usr_arg.usr_addr + VM_PAGE_SIZE, data_kva[1],
                         cal_page_size(usr_arg.size, 1)) ||
            copy_to_user((void *)usr_arg.usr_addr + 2 * VM_PAGE_SIZE,
                         data_kva[2], cal_page_size(usr_arg.size, 2)) ||
            copy_to_user((void *)usr_arg.usr_addr + 3 * VM_PAGE_SIZE,
                         data_kva[3], cal_page_size(usr_arg.size, 3)) ||
            copy_to_user((void *)usr_arg.usr_addr + 4 * VM_PAGE_SIZE,
                         data_kva[4], cal_page_size(usr_arg.size, 4)) ||
            copy_to_user((void *)usr_arg.usr_addr + 5 * VM_PAGE_SIZE,
                         data_kva[5], cal_page_size(usr_arg.size, 5)) ||
            copy_to_user((void *)usr_arg.usr_addr + 6 * VM_PAGE_SIZE,
                            data_kva[6], cal_page_size(usr_arg.size, 6))) {
            WARNING("%s: failed to copy to user", __func__);
            for (int i = 0; i < 7; i++) {
                vfree(data_kva[i]);
            }
            return false;
        }
        for (int i = 0; i < 7; i++) {
            vfree(data_kva[i]);
        }
        return true;
    }
    case HVC_SHMEM_DELETE: {
        struct {
            u64 usr_addr;
            u64 size;
        } usr_arg;
        void *data_kva[7];

        if (copy_from_user(&usr_arg, (void *)arg, sizeof(usr_arg))) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }
        for (int i = 0; i < 7; i++) {
            data_kva[i] = vmalloc(VM_PAGE_SIZE);
        }
        ret = hvc_call(kva2ipa((u64)data_kva[0]), kva2ipa((u64)data_kva[1]),
                       kva2ipa((u64)data_kva[2]), kva2ipa((u64)data_kva[3]),
                       kva2ipa((u64)data_kva[4]), kva2ipa((u64)data_kva[5]),
                       kva2ipa((u64)data_kva[6),
                       HVC_MODE(HVC_SHARE, HVC_SHARE_MEM_GET_SIZE));
        if (ret != 0) {
            WARNING("%s: hvc call share_mem_get_size not finish", __func__);
            return false;
        }
        if (copy_to_user((void *)usr_arg.usr_addr, data_kva[0],
                         cal_page_size(usr_arg.size, 0)) ||
            copy_to_user((void *)usr_arg.usr_addr + VM_PAGE_SIZE, data_kva[1],
                         cal_page_size(usr_arg.size, 1)) ||
            copy_to_user((void *)usr_arg.usr_addr + 2 * VM_PAGE_SIZE,
                         data_kva[2], cal_page_size(usr_arg.size, 2)) ||
            copy_to_user((void *)usr_arg.usr_addr + 3 * VM_PAGE_SIZE,
                         data_kva[3], cal_page_size(usr_arg.size, 3)) ||
            copy_to_user((void *)usr_arg.usr_addr + 4 * VM_PAGE_SIZE,
                         data_kva[4], cal_page_size(usr_arg.size, 4)) ||
            copy_to_user((void *)usr_arg.usr_addr + 5 * VM_PAGE_SIZE,
                         data_kva[5], cal_page_size(usr_arg.size, 5)) ||
            copy_to_user((void *)usr_arg.usr_addr + 6 * VM_PAGE_SIZE,
                         data_kva[6], cal_page_size(usr_arg.size, 6))) {
            WARNING("%s: failed to copy to user", __func__);
            for (int i = 0; i < 7; i++) {
                vfree(data_kva[i]);
            }
            return false;
        }
        for (int i = 0; i < 7; i++) {
            vfree(data_kva[i]);
        }
        return true;
    }
}

bool handle_hvc_security(u64 event, u64 arg) {
    u64 ret = 0;
    switch (event) {
    case HVC_SECURITY_GET_CFG: {
        int i;
        struct {
            u64 usr_addr;
            u64 size;
        } usr_arg;
        void *data_kva[7];

        if (copy_from_user(&usr_arg, (void *)arg, sizeof(usr_arg))) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }
        for (i = 0; i < 7; i++) {
            data_kva[i] = vmalloc(VM_PAGE_SIZE);
        }
        ret = hvc_call(kva2ipa((u64)data_kva[0]), kva2ipa((u64)data_kva[1]),
                       kva2ipa((u64)data_kva[2]), kva2ipa((u64)data_kva[3]),
                       kva2ipa((u64)data_kva[4]), kva2ipa((u64)data_kva[5]),
                       kva2ipa((u64)data_kva[6]),
                       HVC_MODE(HVC_SECURITY, HVC_SECURITY_GET_CFG));
        if (ret != 0) {
            WARNING("%s: hvc call security_get_cfg not finish", __func__);
            return false;
        }
        if (copy_to_user((void *)usr_arg.usr_addr, data_kva[0],
                         cal_page_size(usr_arg.size, 0)) ||
            copy_to_user((void *)usr_arg.usr_addr + VM_PAGE_SIZE, data_kva[1],
                         cal_page_size(usr_arg.size, 1)) ||
            copy_to_user((void *)usr_arg.usr_addr + 2 * VM_PAGE_SIZE,
                         data_kva[2], cal_page_size(usr_arg.size, 2)) ||
            copy_to_user((void *)usr_arg.usr_addr + 3 * VM_PAGE_SIZE,
                         data_kva[3], cal_page_size(usr_arg.size, 3)) ||
            copy_to_user((void *)usr_arg.usr_addr + 4 * VM_PAGE_SIZE,
                         data_kva[4], cal_page_size(usr_arg.size, 4)) ||
            copy_to_user((void *)usr_arg.usr_addr + 5 * VM_PAGE_SIZE,
                         data_kva[5], cal_page_size(usr_arg.size, 5)) ||
            copy_to_user((void *)usr_arg.usr_addr + 6 * VM_PAGE_SIZE,
                         data_kva[6], cal_page_size(usr_arg.size, 6))) {
            WARNING("%s: failed to copy to user", __func__);
            for (i = 0; i < 7; i++)
                vfree(data_kva[i]);
            return false;
        }
        for (i = 0; i < 7; i++)
            vfree(data_kva[i]);
        return true;
    }
    case HVC_SECURITY_GET_LOG: {
        u64 idx;
        struct {
            void *data_kva;
            u64 *num_kva;
        } hvc_arg;

        struct {
            u64 usr_data_kva;
            u64 usr_num_kva;
        } user_arg;

        u64 *data_kva_l1 = vmalloc(VM_PAGE_SIZE);
        hvc_arg.num_kva = vmalloc(sizeof(u64));
        hvc_arg.data_kva = vmalloc(SPACE_MAX_SIZE);
        if (copy_from_user(&user_arg, (void *)arg, 2 * sizeof(u64))) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }

        for (idx = 0; idx < VM_PAGE_SIZE / sizeof(u64); ++idx) {
            u64 cur_kva =
                (u64)hvc_arg.data_kva + 5 * VM_PAGE_SIZE + idx * VM_PAGE_SIZE;
            data_kva_l1[idx] = kva2ipa(cur_kva);
        }

        ret = hvc_call(kva2ipa((u64)hvc_arg.data_kva),
                       kva2ipa((u64)hvc_arg.data_kva + VM_PAGE_SIZE),
                       kva2ipa((u64)hvc_arg.data_kva + 2 * VM_PAGE_SIZE),
                       kva2ipa((u64)hvc_arg.data_kva + 3 * VM_PAGE_SIZE),
                       kva2ipa((u64)hvc_arg.data_kva + 4 * VM_PAGE_SIZE),
                       kva2ipa((u64)data_kva_l1), kva2ipa((u64)hvc_arg.num_kva),
                       HVC_MODE(HVC_SECURITY, HVC_SECURITY_GET_LOG));
        if (ret != 0) {
            WARNING("%s: hvc call security_get_log not finish", __func__);
            vfree(data_kva_l1);
            vfree(hvc_arg.data_kva);
            vfree(hvc_arg.num_kva);
            return false;
        }

        if (copy_to_user((void *)user_arg.usr_data_kva, hvc_arg.data_kva,
                         SPACE_MAX_SIZE)) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }
        if (copy_to_user((void *)user_arg.usr_num_kva, hvc_arg.num_kva,
                         sizeof(u64))) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }

        vfree(hvc_arg.data_kva);
        vfree(hvc_arg.num_kva);
        vfree(data_kva_l1);
        return true;
    }
    case HVC_SECURITY_WATCH_DOG: {
        ret = hvc_call(0, 0, 0, 0, 0, 0, 0,
                       HVC_MODE(HVC_SECURITY, HVC_SECURITY_WATCH_DOG));
        if (ret != 0) {
            WARNING("%s: hvc call security_watch_dog not finish", __func__);
            return false;
        }
        return true;
    }
    case HVC_SECURITY_GET_INFO: {
        u64 idx;
        struct {
            void *data_kva;
            u64 *num_kva;
        } hvc_arg;

        struct {
            u64 usr_data_kva;
            u64 usr_num_kva;
        } user_arg;

        u64 *data_kva_l1 = vmalloc(VM_PAGE_SIZE);
        hvc_arg.num_kva = vmalloc(sizeof(u64));
        hvc_arg.data_kva = vmalloc(LOG_BUFFER_BYTES);

        if (copy_from_user(&user_arg, (void *)arg, 2 * sizeof(u64))) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }
        for (idx = 0;
             idx < (LOG_BUFFER_BYTES - 5 * VM_PAGE_SIZE) / VM_PAGE_SIZE;
             ++idx) {
            u64 cur_kva =
                (u64)hvc_arg.data_kva + 5 * VM_PAGE_SIZE + idx * VM_PAGE_SIZE;
            data_kva_l1[idx] = kva2ipa(cur_kva);
        }

        ret = hvc_call(kva2ipa((u64)hvc_arg.data_kva),
                       kva2ipa((u64)hvc_arg.data_kva + VM_PAGE_SIZE),
                       kva2ipa((u64)hvc_arg.data_kva + 2 * VM_PAGE_SIZE),
                       kva2ipa((u64)hvc_arg.data_kva + 3 * VM_PAGE_SIZE),
                       kva2ipa((u64)hvc_arg.data_kva + 4 * VM_PAGE_SIZE),
                       kva2ipa((u64)data_kva_l1), kva2ipa((u64)hvc_arg.num_kva),
                       HVC_MODE(HVC_SECURITY, HVC_SECURITY_GET_INFO));
        if (ret != 0) {
            WARNING("%s: hvc call security_get_log not finish", __func__);
            vfree(data_kva_l1);
            vfree(hvc_arg.data_kva);
            vfree(hvc_arg.num_kva);
            return false;
        }

        if (copy_to_user((void *)user_arg.usr_data_kva, hvc_arg.data_kva,
                         LOG_BUFFER_BYTES)) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }
        if (copy_to_user((void *)user_arg.usr_num_kva, hvc_arg.num_kva,
                         sizeof(u64))) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }

        vfree(hvc_arg.data_kva);
        vfree(hvc_arg.num_kva);
        vfree(data_kva_l1);
        return true;
    }
    case HVC_SECURITY_TEST: {
        switch (arg) {
        case ACC_2_N_3: {
            ret = hvc_call(ACC_2_N_3, 101, 0, 0, 0, 0, 0,
                           HVC_MODE(HVC_SECURITY, HVC_SECURITY_TEST));
            if (ret != 0) {
                WARNING("%s: test ACC_2_N_3 fail", __func__);
                return false;
            }
            return true;
        }
        case ACF_1_N_1: {
            ret = hvc_call(ACF_1_N_1, 0, 0, 0, 0, 0, 0,
                           HVC_MODE(HVC_SECURITY, HVC_SECURITY_TEST));
            if (ret != 0) {
                WARNING("%s: test ACF_1_N_1 fail!", __func__);
                return false;
            }
            return true;
        }
        case ACF_1_N_2: {
            ret = hvc_call(ACF_1_N_2, 0, 0, 0, 0, 0, 0,
                           HVC_MODE(HVC_SECURITY, HVC_SECURITY_TEST));
            if (ret != 0) {
                WARNING("%s: test ACF_1_N_2 fail!", __func__);
                return false;
            }
            return true;
        }
        case ARP_1_N_2_4: {
            ret = hvc_call(ARP_1_N_2_2, 0, 0, 0, 0, 0, 0,
                           HVC_MODE(HVC_SECURITY, HVC_SECURITY_TEST));
            if (ret != 0) {
                WARNING("%s: test ARP_1_N_2_2 fail!", __func__);
                return false;
            }
            ssleep(9);
            ret = hvc_call(ARP_1_N_2_3, 0, 0, 0, 0, 0, 0,
                           HVC_MODE(HVC_SECURITY, HVC_SECURITY_TEST));
            if (ret != 0) {
                WARNING("%s: test ARP_1_N_2_3 fail!", __func__);
                return false;
            }
            return true;
        }
        default:
            ret = hvc_call(arg, 0, 0, 0, 0, 0, 0,
                           HVC_MODE(HVC_SECURITY, HVC_SECURITY_TEST));
            if (ret != 0) {
                WARNING("%s: test %d fail!", __func__, (u32)arg);
                return false;
            }
            return true;
        }
        return true;
    }
    // case HVC_SECURITY_DEEP_TEST: {
    //     return !hvc_call(0, 0, 0, 0, 0, 0, 0,
    //                      HVC_MODE(HVC_SECURITY, HVC_SECURITY_DEEP_TEST)) == 0;
    // }
    case HVC_SECURITY_RANDOM_TEST: {
        u64 hvc_arg[8];
        if (copy_from_user(hvc_arg, (void *)arg, 8 * sizeof(u64))) {
            WARNING("%s: failed to copy from user", __func__);
            return false;
        }
        hvc_call(hvc_arg[0], hvc_arg[1], hvc_arg[2], hvc_arg[3], hvc_arg[4],
                 hvc_arg[5], hvc_arg[6], hvc_arg[7]);
        return true;
    }
    default:
        return false;
    }
}

bool handle_hvc_config(u64 event, u64 arg) {
    switch (event) {
    case HVC_CONFIG_ADD_VM: {
        return hvc_config_add_vm(arg);
    } break;
    case HVC_CONFIG_DELETE_VM: {
        return hvc_config_delete_vm(arg);
    } break;
    case HVC_CONFIG_CPU: {
        return hvc_config_vm_cpu(arg);
    } break;
    case HVC_CONFIG_MEMORY_REGION: {
        return hvc_config_vm_memory_region(arg);
    } break;
    case HVC_CONFIG_EMULATED_DEVICE: {
        return hvc_config_vm_emulated_device(arg);
    } break;
    case HVC_CONFIG_PASSTHROUGH_DEVICE_REGION: {
        return hvc_config_vm_passthrough_device_region(arg);
    } break;
    case HVC_CONFIG_PASSTHROUGH_DEVICE_IRQS: {
        return hvc_config_vm_passthrough_device_irqs(arg);
    } break;
    case HVC_CONFIG_PASSTHROUGH_DEVICE_STREAMS_IDS: {
        return hvc_config_vm_passthrough_device_streams_ids(arg);
    } break;
    case HVC_CONFIG_DTB_DEVICE: {
        return hvc_config_vm_dtb_device(arg);
    } break;
    case HVC_CONFIG_UPLOAD_KERNEL_IMAGE: {
        return hvc_config_vm_upload_kernel_image(arg);
    } break;
    case HVC_CONFIG_MEMORY_COLOR_BUDGET:
        return hvc_config_vm_memory_color_budget(arg);
    case HVC_CONFIG_UPLOAD_DEVICE_TREE:
        return hvc_config_vm_upload_device_tree(arg);
    default:
        return false;
    }
    return false;
}
