/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2019-2022. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2019-10-15
 */

#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/memory.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/version.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/pagemap.h>
#include <linux/atomic.h>
#include <linux/nsproxy.h>
#include <linux/mm.h>

#include "devmm_adapt.h"

#include "svm_cmd.h"
#include "devmm_proc_info.h"
#include "devmm_proc_mem_copy.h"
#include "devmm_channel.h"
#include "devdrv_interface.h"
#include "devmm_common.h"
#include "svm_dma.h"
#include "devmm_dev.h"
#include "svm_proc_mng.h"
#include "svm_heap_mng.h"
#include "svm_mem_mng.h"
#include "svm_cgroup_mng.h"
#include "svm_mem_query.h"
#include "svm_interface.h"
#include "svm_page_cnt_stats.h"
#include "svm_proc_gfp.h"

#include "kernel_version_adapt.h"

#ifdef CFG_FEATURE_VFIO
#include "devmm_pm_vpc.h"
#include "devmm_pm_adapt.h"
#endif

#define DEVMM_MAX(a, b) (((a) > (b)) ? (a) : (b))

#ifndef DEVMM_UT
typedef int (*enquire_soc_resource_func)(u32 devid, u32 vfid, struct vmng_soc_resource_enquire *info);

int devmm_enquire_soc_resource(u32 devid, u32 vfid, struct vmng_soc_resource_enquire *info)
{
    enquire_soc_resource_func enquire_soc_resource;

    enquire_soc_resource = (enquire_soc_resource_func)(uintptr_t)__kallsyms_lookup_name("vmngd_enquire_soc_resource");
    if (enquire_soc_resource == NULL) {
        devmm_drv_err("Unexpected.\n");
        return -EINVAL;
    }

    return enquire_soc_resource(devid, vfid, info);
}
#endif

enum devmm_endpoint_type devmm_get_end_type(void)
{
    return DEVMM_END_TYPE;
}

STATIC void devmm_chan_update_msg_process_id(struct devmm_chan_msg_head *msg_head)
{
    if (devmm_get_end_type() == DEVMM_END_DEVICE) {
        msg_head->process_id.devid = msg_head->dev_id;
    } else {
        msg_head->vfid = msg_head->process_id.vfid;
        msg_head->process_id.vm_id = 0;
        msg_head->process_id.vfid = 0;
    }
}

bool devmm_va_is_not_svm_process_addr(const struct devmm_svm_process *svm_process, unsigned long va)
{
    if (svm_process == NULL) {
        return DEVMM_TRUE;
    }

    return ((va < svm_process->start_addr) || (va > svm_process->end_addr));
}

#ifndef DRV_UT
pmd_t *devmm_get_va_to_pmd(const struct vm_area_struct *vma, unsigned long va)
{
    pgd_t *pgd = NULL;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
    p4d_t *p4d = NULL;
#endif
    pud_t *pud = NULL;
    pmd_t *pmd = NULL;

    if ((vma == NULL) || (vma->vm_mm == NULL)) {
        devmm_drv_err("Vm_mm none. (va=0x%lx)\n", va);
        return NULL;
    }
    /* too much log, not print */
    pgd = pgd_offset(vma->vm_mm, va);
    if (PXD_JUDGE(pgd)) {
        return NULL;
    }

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
    p4d = p4d_offset(pgd, va);
    if (PXD_JUDGE(p4d) != 0) {
        return NULL;
    }

    /* if kernel version is above 4.11.0,then 5 level pt arrived.
    pud_offset(pgd,va) changed to pud_offset(p4d,va) for x86
    but not changed in arm64 */
    pud = pud_offset(p4d, va);
    if (PXD_JUDGE(pud) != 0) {
        return NULL;
    }
#else
    pud = pud_offset(pgd, va);
    if (PXD_JUDGE(pud) != 0) {
        return NULL;
    }
#endif

    pmd = pmd_offset(pud, va);
    return pmd;
}

int devmm_va_to_pmd(const struct vm_area_struct *vma, unsigned long va, int huge_flag, pmd_t **tem_pmd)
{
    pmd_t *pmd = NULL;

    pmd = devmm_get_va_to_pmd(vma, va);
    *tem_pmd = pmd;
    if (huge_flag != 0) {
        /* huge page pmd can not judge bad flag */
        if (PMD_HUGE(pmd) == 0) {
            return -EDOM;
        }
    } else {
        if (PMD_JUDGE(pmd) != 0) {
            return -EDOM;
        }
    }
    return 0;
}

int devmm_va_to_pa(const struct vm_area_struct *vma, u64 va, u64 *pa)
{
    u64 aligned_va = round_down(va, PAGE_SIZE);
    pmd_t *pmd = NULL;
    pte_t *pte = NULL;
    int ret;

    ret = devmm_va_to_pmd(vma, aligned_va, 0, &pmd);
    if (ret != 0) {
        /* too much log, not print */
        return ret;
    }
    pte = __pte_map(pmd, aligned_va);
    //pte = pte_offset_map(pmd, aligned_va);
    if ((pte_none(*pte) != 0) || (pte_present(*pte) == 0)) {
        return -ERANGE;
    }

    *pa = PFN_PHYS(pte_pfn(*pte));
    *pa += (va - aligned_va);

    return 0;
}

STATIC int devmm_va_to_pa_pmd_range(pmd_t *pmd, u64 start, u64 end, u64 *pas, u64 *num)
{
    pte_t *pte = NULL;
    u64 got_num = 0;

    //pte = pte_offset_map(pmd, start);
    pte = __pte_map(pmd, start);
    for (; start != end; pte++, start += PAGE_SIZE) {
        if ((pte_none(*pte) != 0) || (pte_present(*pte) == 0)) {
            return -ERANGE;
        }

        pas[got_num] = PFN_PHYS(pte_pfn(*pte));
        got_num++;
    }

    *num = got_num;
    return 0;
}

STATIC int devmm_va_to_pa_pud_range(pud_t *pud, u64 start, u64 end, u64 *pas, u64 *num)
{
    pmd_t *pmd = NULL;
    u64 got_num = 0;
    u64 next;

    pmd = pmd_offset(pud, start);
    for (; start != end; pmd++, start = next) {
        int ret;
        u64 n;

        if (PXD_JUDGE(pmd) != 0) {
            return -EDOM;
        }

        next = pmd_addr_end(start, end);
        ret = devmm_va_to_pa_pmd_range(pmd, start, next, &pas[got_num], &n);
        if (ret != 0) {
            return ret;
        }
        got_num += n;
    }

    *num = got_num;
    return 0;
}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
STATIC int devmm_va_to_pa_p4d_range(p4d_t *p4d, u64 start, u64 end, u64 *pas, u64 *num)
{
    pud_t *pud = NULL;
    u64 got_num = 0;
    u64 next;

    pud = pud_offset(p4d, start);
    for (; start != end; pud++, start = next) {
        int ret;
        u64 n;

        if (PXD_JUDGE(pud) != 0) {
            return -EDOM;
        }

        next = pud_addr_end(start, end);
        ret = devmm_va_to_pa_pud_range(pud, start, next, &pas[got_num], &n);
        if (ret != 0) {
            return ret;
        }
        got_num += n;
    }

    *num = got_num;
    return 0;
}

STATIC int devmm_va_to_pa_pgd_range(pgd_t *pgd, u64 start, u64 end, u64 *pas, u64 *num)
{
    p4d_t *p4d = NULL;
    u64 got_num = 0;
    u64 next;

    p4d = p4d_offset(pgd, start);
    for (; start != end; p4d++, start = next) {
        int ret;
        u64 n;

        if (PXD_JUDGE(p4d) != 0) {
            return -EDOM;
        }

        next = p4d_addr_end(start, end);
        ret = devmm_va_to_pa_p4d_range(p4d, start, next, &pas[got_num], &n);
        if (ret != 0) {
            return ret;
        }
        got_num += n;
    }

    *num = got_num;
    return 0;
}
#else
STATIC int devmm_va_to_pa_pgd_range(pgd_t *pgd, u64 start, u64 end, u64 *pas, u64 *num)
{
    pud_t *pud = NULL;
    u64 got_num = 0;
    u64 next;

    pud = pud_offset(pgd, start);
    for (; start != end; pud++, start = next) {
        int ret;
        u64 n;

        if (PXD_JUDGE(pud) != 0) {
            return -EDOM;
        }

        next = pud_addr_end(start, end);
        ret = devmm_va_to_pa_pud_range(pud, start, next, &pas[got_num], &n);
        if (ret != 0) {
            return ret;
        }
        got_num += n;
    }

    *num = got_num;
    return 0;
}
#endif

int devmm_va_to_pa_range(const struct vm_area_struct *vma, u64 va, u64 num, u64 *pas)
{
    u64 start, end, len, next;
    pgd_t *pgd = NULL;
    u64 got_num = 0;

    start = round_down(va, PAGE_SIZE);
    len = num << PAGE_SHIFT;
    end = start + len;
    if (end < start) {
        return -EINVAL;
    }

    pgd = pgd_offset(vma->vm_mm, start);
    for (; start != end; pgd++, start = next) {
        int ret;
        u64 n;

        if (PXD_JUDGE(pgd) != 0) {
            return -EDOM;
        }

        next = pgd_addr_end(start, end);
        ret = devmm_va_to_pa_pgd_range(pgd, start, next, &pas[got_num], &n);
        if (ret != 0) {
            return ret;
        }
        got_num += n;
    }

    return 0;
}
#endif

#ifndef HOST_AGENT
int devmm_get_svm_pages(struct vm_area_struct *vma, u64 va, u64 num, struct page **pages)
{
    u64 *pas = (u64 *)pages;
    int ret;
    u64 i;

    ret = devmm_va_to_pa_range(vma, va, num, pas); /* tmp store pa */
    if (ret != 0) {
        devmm_drv_err("Query pa fail. (va=0x%llx; num=%llu)\n", va, num);
        return ret;
    }

    for (i = 0; i < num; i++) {
        pages[i] = devmm_pa_to_page(pas[i]);
        get_page(pages[i]);
    }

    return 0;
}

int devmm_get_pages_list(struct mm_struct *mm, u64 va, u64 num, struct page **pages)
{
    struct devmm_svm_process *svm_proc = NULL;
    struct devmm_svm_heap *heap = NULL;
    u64 size = num << PAGE_SHIFT;
    int ret;

    if ((mm == NULL) || (pages == NULL)) {
        devmm_drv_err("Invaild para.\n");
        return -EINVAL;
    }

    svm_proc = devmm_get_svm_proc_by_mm(mm);
    if (svm_proc == NULL) {
        devmm_drv_err("Get svm_proc failed. (mm=%p)\n", mm);
        return -EINVAL;
    }

    heap = devmm_svm_get_heap(svm_proc, va);
    if (heap == NULL) {
        devmm_drv_err("Get heap failed. (va=0x%llx)\n", va);
        return -EINVAL;
    }

    ret = devmm_check_va_add_size_by_heap(heap, va, size);
    if (ret != 0) {
        devmm_drv_err("Out of range. (va=0x%llx; size=%llu)\n", va, size);
        return -EINVAL;
    }

    return devmm_get_svm_pages_with_lock(svm_proc, va, num, pages);
}
EXPORT_SYMBOL(devmm_get_pages_list);
#endif

/**
 * num input the size of the pa, output the real found page-block-num.
 */
int devmm_va_to_palist(const struct vm_area_struct *vma, u64 va, u64 sz, u64 *pa, u32 *num)
{
    u64 vaddr, paddr;
    u32 pg_num = 0;
    int ret = 0;

    for (vaddr = round_down(va, PAGE_SIZE); vaddr < round_up(va + sz, PAGE_SIZE); vaddr += PAGE_SIZE) {
        if (devmm_va_to_pa(vma, vaddr, &paddr) != 0) {
            /* too much log, not print */
            ret = -ENOENT;
            break;
        }
        if (pg_num >= *num) {
            /* va size more then array num */
            break;
        }
        pa[pg_num++] = paddr;
    }
    *num = pg_num;
    return ret;
}

void devmm_zap_vma_ptes(struct vm_area_struct *vma, unsigned long vaddr, unsigned long size)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
    int ret;

    ret = zap_vma_ptes(vma, vaddr, size);
    if (ret != 0) {
        devmm_drv_err("Zap_vma_ptes fail. (va=0x%lx; ret=%d; flags=0x%lx; start=0x%lx; end=0x%lx)\n",
                      vaddr, ret, vma->vm_flags, vma->vm_start, vma->vm_end);
    }
#else
    zap_vma_ptes(vma, vaddr, size);
#endif
}

void devmm_unmap_page_from_vma_owner(struct devmm_svm_process *svm_proc,
    struct vm_area_struct *vma, u64 vaddr, u64 num)
{
    struct devmm_phy_addr_attr attr = {0};
    struct page *pg = NULL;
    u64 i, temp_addr, paddr;

    devmm_phy_addr_attr_pack(svm_proc, DEVMM_NORMAL_PAGE_TYPE, 0, false, &attr);

    for (i = 0; i < num; i++) {
        int ret;
        temp_addr = vaddr + (i << PAGE_SHIFT);
        ret = devmm_va_to_pa(vma, temp_addr, &paddr);
        if (ret != 0) {
            continue;
        }
        devmm_zap_vma_ptes(vma, temp_addr, PAGE_SIZE);
        if (devmm_pa_is_remote_addr(paddr)) {
            continue;
        }

        pg = devmm_pa_to_page(paddr);
        devmm_proc_free_pages(svm_proc, &attr, &pg, 1);
    }

    return;
}

void devmm_unmap_page_from_vma_custom(struct devmm_svm_process *svm_proc,
    struct vm_area_struct *vma, u64 vaddr, u64 num)
{
    u64 i, temp_addr, paddr;

    for (i = 0; i < num; i++) {
        int ret;
        temp_addr = vaddr + (i << PAGE_SHIFT);
        ret = devmm_va_to_pa(vma, temp_addr, &paddr);
        if (ret != 0) {
            continue;
        }
        devmm_zap_vma_ptes(vma, temp_addr, PAGE_SIZE);
        if (devmm_pa_is_remote_addr(paddr)) {
            continue;
        }

        devmm_put_normal_page(devmm_pa_to_page(paddr));
    }

    return;
}

void devmm_unmap_pages_owner(struct devmm_svm_process *svm_proc, u64 vaddr, u64 num)
{
    struct vm_area_struct *vma = NULL;
    u32 i;

    vma = devmm_find_vma(svm_proc, vaddr);
    if (vma == NULL) {
        devmm_drv_err("Can not find vma. (vaddr=0x%llx; hostpid=%d; devid=%d; vfid=%d)\n",
            vaddr, svm_proc->process_id.hostpid, svm_proc->process_id.devid, svm_proc->process_id.vfid);
        return;
    }

    for (i = 0; i < DEVMM_CUSTOM_PROCESS_NUM; i++) {
        mutex_lock(&svm_proc->custom[i].proc_lock);
    }
    devmm_unmap_page_from_vma_owner(svm_proc, vma, vaddr, num);
    for (i = 0; i < DEVMM_CUSTOM_PROCESS_NUM; i++) {
        mutex_unlock(&svm_proc->custom[i].proc_lock);
    }

    return;
}

static void  devmm_unmap_pages_custom(struct devmm_svm_process *svm_proc, u64 vaddr, u64 num)
{
    struct vm_area_struct *vma = NULL;
    u32 i;

    for (i = 0; i < DEVMM_CUSTOM_PROCESS_NUM; i++) {
        mutex_lock(&svm_proc->custom[i].proc_lock);
        if (svm_proc->custom[i].status != DEVMM_CUSTOM_USED) {
            mutex_unlock(&svm_proc->custom[i].proc_lock);
            continue;
        }
        vma = devmm_find_vma_custom(svm_proc, i, vaddr);
        if (vma == NULL) {
            mutex_unlock(&svm_proc->custom[i].proc_lock);
            continue;
        }
        devmm_unmap_page_from_vma_custom(svm_proc, vma, vaddr, num);
        mutex_unlock(&svm_proc->custom[i].proc_lock);
    }

    return;
}

void devmm_unmap_pages(struct devmm_svm_process *svm_proc, u64 vaddr, u32 adjust_order)
{
    u64 num = 1ull << adjust_order;

    devmm_unmap_pages_custom(svm_proc, vaddr, num);
    devmm_unmap_pages_owner(svm_proc, vaddr, num);
}

static void devmm_zap_ptes_range(struct vm_area_struct *vma, u64 va, u64 page_num)
{
    u64 i, vaddr;

    for (i = 0, vaddr = va; i < page_num; i++, vaddr += PAGE_SIZE) {
        devmm_zap_vma_ptes(vma, vaddr, PAGE_SIZE);
    }
    return;
}

static void devmm_zap_owner_ptes_range(struct devmm_svm_process *svm_proc, u64 va, u64 page_num)
{
    struct vm_area_struct *vma = NULL;

    vma = devmm_find_vma(svm_proc, va);
    if (vma == NULL) {
        return;
    }

    devmm_zap_ptes_range(vma, va, page_num);
    return;
}

int devmm_insert_pages_to_vma_owner(struct vm_area_struct *vma, u64 va,
    u64 page_num, struct page **inpages, u32 pgprot)
{
    u64 i, addr;
    int ret;

    for (i = 0, addr = va; i < page_num; i++, addr += PAGE_SIZE) {
        ret = remap_pfn_range(vma, addr, page_to_pfn(inpages[i]), PAGE_SIZE, devmm_make_pgprot(pgprot));
        if (ret != 0) {
            devmm_drv_err("Vm_insert_page failed. (ret=%d; va=0x%llx; i=%llu; page_num=%llu)\n",
                ret, addr, i, page_num);
            devmm_zap_ptes_range(vma, va, i);
            return -ENOMEM;
        }
    }

    return 0;
}

static int devmm_pages_remap_owner(struct devmm_svm_process *svm_proc, u64 va, u64 page_num,
    struct page **inpages, u32 page_prot)
{
    struct vm_area_struct *vma = NULL;
    int ret;

    vma = devmm_find_vma(svm_proc, va);
    if (vma == NULL) {
        return -EADDRNOTAVAIL;
    }

    ret = devmm_insert_pages_to_vma_owner(vma, va, page_num, inpages, page_prot);
    if (ret != 0) {
        devmm_drv_info("Can not insert_pages_vma cp. (va=0x%llx; ret=%d)\n", va, ret);
        return ret;
    }

    return 0;
}

int devmm_insert_pages_to_vma_custom(struct vm_area_struct *vma, u64 va,
    u64 page_num, struct page **inpages, u32 pgprot)
{
    unsigned long pfn;
    u64 i, vaddr;
    int ret;

    for (i = 0; i < page_num; i++) {
        vaddr = va + (i << PAGE_SHIFT);
        ret = follow_pfn(vma, vaddr, &pfn);
        if (ret == 0) {
            continue;
        }
        ret = remap_pfn_range(vma, vaddr, page_to_pfn(inpages[i]), PAGE_SIZE, devmm_make_pgprot(pgprot));
        if (ret != 0) {
            devmm_drv_err("Vm_insert_page failed. (ret=%d; va=0x%llx; i=%llu; page_num=%llu)\n",
                ret, vaddr, i, page_num);
            devmm_zap_ptes_range(vma, va, i);
            return -ENOMEM;
        }
    }

    return 0;
}

static int devmm_pages_remap_custom(struct devmm_svm_process *svm_proc, u64 va, u64 page_num, struct page **inpages,
    u32 page_prot)
{
    struct vm_area_struct *vma = NULL;
    int ret;
    u32 i;

    for (i = 0; i < DEVMM_CUSTOM_PROCESS_NUM; i++) {
        mutex_lock(&svm_proc->custom[i].proc_lock);
        if (svm_proc->custom[i].status != DEVMM_CUSTOM_USED) {
            mutex_unlock(&svm_proc->custom[i].proc_lock);
            continue;
        }
        vma = devmm_find_vma_custom(svm_proc, i, va);
        if (vma == NULL) {
            mutex_unlock(&svm_proc->custom[i].proc_lock);
            continue;
        }
        ret = devmm_insert_pages_to_vma_custom(vma, va, page_num, inpages, page_prot);
        if (ret != 0) {
            mutex_unlock(&svm_proc->custom[i].proc_lock);
            return ret;
        }
        devmm_pin_user_pages(inpages, page_num);
        mutex_unlock(&svm_proc->custom[i].proc_lock);
    }

    return 0;
}

int devmm_pages_remap(struct devmm_svm_process *svm_proc, u64 va, u64 page_num, struct page **inpages, u32 page_prot)
{
    int ret;

    ret = devmm_pages_remap_owner(svm_proc, va, page_num, inpages, page_prot);
    if (ret != 0) {
        devmm_drv_info("Can not devmm_insert_pages_to_vma. (page_num=%llu; ret=%d)\n",
            page_num, ret);
        return ret;
    }
    ret = devmm_pages_remap_custom(svm_proc, va, page_num, inpages, page_prot);
    if (ret != 0) {
        devmm_drv_err("Devmm_insert_pages_custom fail. (page_num=%llu; ret=%d)\n",
            page_num, ret);
        devmm_zap_owner_ptes_range(svm_proc, va, page_num);
        return ret;
    }

    return 0;
}

void devmm_zap_normal_pages(struct devmm_svm_process *svm_proc, u64 va, u64 page_num)
{
    devmm_unmap_pages_custom(svm_proc, va, page_num);               // TO-DO: optimize name later
    devmm_zap_owner_ptes_range(svm_proc, va, page_num);
}

int devmm_remap_pages(struct devmm_svm_process *svm_proc, u64 va,
    struct page **pages, u64 pg_num, u32 pg_type)
{
    if (pg_type == DEVMM_NORMAL_PAGE_TYPE) {
        return devmm_pages_remap(svm_proc, va, pg_num, pages, 0);   // TO-DO: optimize name later
    } else {
        return devmm_remap_huge_pages(svm_proc, va, pages, pg_num, 0);
    }
}

void devmm_zap_pages(struct devmm_svm_process *svm_proc, u64 va, u64 pg_num, u32 pg_type)
{
    if (pg_type == DEVMM_NORMAL_PAGE_TYPE) {
        devmm_zap_normal_pages(svm_proc, va, pg_num);
    } else {
        devmm_zap_huge_pages(svm_proc, va, pg_num);
    }
}

int devmm_insert_normal_pages(struct page_map_info *page_map_info, struct devmm_svm_process *svm_proc)
{
    struct devmm_phy_addr_attr attr = {0};
    u32 mem_type = page_map_info->mem_type;
    bool is_continuous = page_map_info->is_continuty;
    int ret;

    devmm_phy_addr_attr_pack(svm_proc, DEVMM_NORMAL_PAGE_TYPE, mem_type, is_continuous, &attr);
    ret = devmm_proc_alloc_pages(svm_proc, &attr, page_map_info->inpages, page_map_info->page_num);
    if (ret != 0) {
        devmm_drv_info("Alloc pages failed. (ret=%d; page_num=%llu)\n", ret, page_map_info->page_num);
        return ret;
    }

    ret = devmm_pages_remap(svm_proc, page_map_info->va, page_map_info->page_num, page_map_info->inpages,
        page_map_info->page_prot);
    if (ret != 0) {
        devmm_drv_info("Devmm_insert_pages_to_vma. (page_num=%llu; page_prot=%d; ret=%d)\n",
            page_map_info->page_num, page_map_info->page_prot, ret);
        devmm_proc_free_pages(svm_proc, &attr, page_map_info->inpages, page_map_info->page_num);
        return ret;
    }

    return ret;
}

STATIC struct devmm_svm_heap *devmm_svm_get_heap_proc(struct devmm_svm_process *svm_proc, unsigned long va)
{
    struct devmm_svm_heap *heap = NULL;
    u32 heap_idx;

    heap_idx = (u32)((va - svm_proc->start_addr) / DEVMM_HEAP_SIZE);
    heap = svm_proc->heaps[heap_idx];
    if (devmm_check_heap_is_entity(heap) == false) {
        return NULL;
    }

    return heap;
}

struct devmm_svm_heap *devmm_svm_get_heap(struct devmm_svm_process *svm_proc, unsigned long va)
{
    if (devmm_va_is_not_svm_process_addr(svm_proc, va)) {
        return NULL;
    }

    return devmm_svm_get_heap_proc(svm_proc, va);
}

int devmm_svm_get_svm_proc_and_heap(struct devmm_svm_process_id *process_id, u64 va,
    struct devmm_svm_process **svm_proc, struct devmm_svm_heap **heap)
{
    *svm_proc = devmm_get_svm_proc(process_id);
    if (*svm_proc == NULL) {
        devmm_drv_err("Process is exit. (va=0x%llx; hostpid=%d; devid=%d; vfid=%d)\n",
                      va, process_id->hostpid, process_id->devid, process_id->vfid);
        return -ESRCH;
    }

    *heap = devmm_svm_get_heap(*svm_proc, va);
    if (*heap == NULL) {
        devmm_drv_err("Vaddress is errorr. (va=0x%llx; hostpid=%d; devid=%d; vfid=%d)\n",
                      va, process_id->hostpid, process_id->devid, process_id->vfid);
        return -EADDRNOTAVAIL;
    }
    if (((*heap)->heap_type != DEVMM_HEAP_CHUNK_PAGE) && ((*heap)->heap_type != DEVMM_HEAP_HUGE_PAGE)) {
        devmm_drv_err("Heap type error. (va=0x%llx; hostpid=%d; devid=%d; vfid=%d; type=0x%x)\n",
                      va, process_id->hostpid, process_id->devid, process_id->vfid, (*heap)->heap_type);
        return -EINVAL;
    }

    return 0;
}

void devmm_chan_set_host_device_page_size(void)
{
    devmm_svm->host_page_size = devmm_svm_pageshift2pagesize(devmm_svm->host_page_shift);
    devmm_svm->host_hpage_size = devmm_svm_pageshift2pagesize(devmm_svm->host_hpage_shift);

    devmm_svm->device_page_size = devmm_svm_pageshift2pagesize(devmm_svm->device_page_shift);
    devmm_svm->device_hpage_size = devmm_svm_pageshift2pagesize(devmm_svm->device_hpage_shift);

    devmm_svm->svm_page_size = DEVMM_MAX(devmm_svm->host_page_size, devmm_svm->device_page_size);
    devmm_svm->svm_page_shift = DEVMM_MAX(devmm_svm->host_page_shift, devmm_svm->device_page_shift);

    if (devmm_svm->device_hpage_shift < devmm_svm->host_page_shift) {
        devmm_drv_err("Device_huge_page_shfit less than host_page_shfit. (device_hpage_shift=%u; "
                      "host_page_shift=%u)\n", devmm_svm->device_hpage_shift, devmm_svm->host_page_shift);
        return;
    } else {
        /* device huge page size is 2M ,host do not has huge page,just 4k/16k/64k */
        devmm_svm->host_page2device_hpage_order = devmm_svm->device_hpage_shift - devmm_svm->host_page_shift;
    }

    if (devmm_svm->host_page_shift < devmm_svm->device_page_shift) {
        devmm_drv_err("Host_page_shift less than device_page_shift. (host_page_shift=%u; device_page_shift=%u)\n",
            devmm_svm->host_page_shift, devmm_svm->device_page_shift);
        return;
    } else {
        /* device page size is 4k ,host will be 4k/16k/64k */
        devmm_svm->host_page2device_page_order = devmm_svm->host_page_shift - devmm_svm->device_page_shift;
    }

    if (devmm_svm->host_hpage_shift < devmm_svm->device_hpage_shift) {
        devmm_drv_err("Host_hpage_shift less than device_hpage_shift. (host_hpage_shift=%u; "
                      "device_hpage_shift=%u)\n", devmm_svm->host_hpage_shift, devmm_svm->device_hpage_shift);
        return;
    } else {
        devmm_svm->host_hpage2device_hpage_order = devmm_svm->host_hpage_shift - devmm_svm->device_hpage_shift;
    }

    devmm_svm->page_size_inited = 1;

    devmm_drv_info("Shift info. (host_page_shift=%u; host_hpage_shift=%u; evice_page_shift=%u; "
                   "device_hpage_shift=%u; h2dh_adjustorder=%u; h2d_adjustorder=%u) \n",
                   devmm_svm->host_page_shift, devmm_svm->host_hpage_shift, devmm_svm->device_page_shift,
                   devmm_svm->device_hpage_shift, devmm_svm->host_page2device_hpage_order,
                   devmm_svm->host_page2device_page_order);

    devmm_drv_info("Size info. (host_page_size=%u; host_hpage_size=%u; "
                   "device_page_size=%u; device_hpage_size=%u; svm_page_size=%u)\n",
                   devmm_svm->host_page_size, devmm_svm->host_hpage_size, devmm_svm->device_page_size,
                   devmm_svm->device_hpage_size, devmm_svm->svm_page_size);
}

STATIC int devmm_svm_get_channel_lock(struct devmm_svm_process *svm_proc,
    const struct devmm_svm_process_id *process_id, const struct devmm_chan_handlers_st *msg_process, u32 msg_id)
{
    u32 msg_bitmap = msg_process[msg_id].msg_bitmap;

    if (svm_proc == NULL) {
        devmm_drv_err_if(((msg_bitmap & DEVMM_MSG_RETURN_OK_MASK) == 0),
            "Cp is exited, above message. (hostpid=%d; devid=%d; vfid=%d; msg_id=%u)\n",
            process_id->hostpid, process_id->devid, process_id->vfid, msg_id);
        return ((msg_bitmap & DEVMM_MSG_RETURN_OK_MASK) != 0) ? -EOWNERDEAD : -ESRCH ;
    }
    mutex_lock(&svm_proc->proc_lock);
    if ((svm_proc->proc_status & DEVMM_SVM_PROC_ABORT_STATE) != 0) {
        mutex_unlock(&svm_proc->proc_lock);
        devmm_drv_err_if(((msg_bitmap & DEVMM_MSG_RETURN_OK_MASK) == 0),
            "Cp(aicpu) is exiting, above message. (hostpid=%d; devid=%d; vfid=%d; msg_id=%u).\n",
            svm_proc->process_id.hostpid, svm_proc->process_id.devid, svm_proc->process_id.vfid, msg_id);
        return ((msg_bitmap & DEVMM_MSG_RETURN_OK_MASK) != 0) ? -EOWNERDEAD : -EBUSY;
    }
    svm_proc->msg_processing++;
    mutex_unlock(&svm_proc->proc_lock);

    if (devmm_get_end_type() == DEVMM_END_HOST) {
        down_read(&svm_proc->bitmap_sem);
    }

    return 0;
}

STATIC void devmm_svm_put_channel_lock(struct devmm_svm_process *svm_proc,
    const struct devmm_chan_handlers_st *msg_process, u32 msg_id)
{
    if (svm_proc == NULL) {
        return;
    }
    mutex_lock(&svm_proc->proc_lock);
    if (svm_proc->msg_processing > 0) {
        svm_proc->msg_processing--;
    }
    mutex_unlock(&svm_proc->proc_lock);

    if (devmm_get_end_type() == DEVMM_END_HOST) {
        up_read(&svm_proc->bitmap_sem);
    }
}

int devmm_svm_other_proc_occupy_num_add(struct devmm_svm_process *svm_proc)
{
    mutex_lock(&svm_proc->proc_lock);
    if (((svm_proc->proc_status & DEVMM_SVM_PROC_ABORT_STATE) != 0) ||
        (svm_proc->inited != DEVMM_SVM_INITED_FLAG)) {
        mutex_unlock(&svm_proc->proc_lock);
        return -EFAULT;
    }

    svm_proc->other_proc_occupying++;
    mutex_unlock(&svm_proc->proc_lock);
    return 0;
}

void devmm_svm_other_proc_occupy_num_sub(struct devmm_svm_process *svm_proc)
{
    mutex_lock(&svm_proc->proc_lock);
    if (svm_proc->other_proc_occupying > 0) {
        svm_proc->other_proc_occupying--;
    }
    mutex_unlock(&svm_proc->proc_lock);
}

int devmm_svm_other_proc_occupy_get_lock(struct devmm_svm_process *svm_proc)
{
    int ret;

    ret = devmm_svm_other_proc_occupy_num_add(svm_proc);
    if (ret != 0) {
        return ret;
    }

    down_read(&svm_proc->ioctl_rwsem);
    return 0;
}

void devmm_svm_other_proc_occupy_put_lock(struct devmm_svm_process *svm_proc)
{
    up_read(&svm_proc->ioctl_rwsem);
    devmm_svm_other_proc_occupy_num_sub(svm_proc);
}

STATIC int devmm_chan_get_svm_proc_and_lock(struct devmm_chan_msg_head *head_msg,
    const struct devmm_chan_handlers_st *msg_process, struct devmm_svm_process **svm_proc)
{
    u32 msg_id = head_msg->msg_id;

    if ((msg_process[msg_id].msg_bitmap & DEVMM_MSG_NOT_NEED_PROC_MASK) != 0) {
        *svm_proc = NULL;
        return 0;
    }
    *svm_proc = devmm_get_svm_proc(&head_msg->process_id);
    return devmm_svm_get_channel_lock(*svm_proc, &head_msg->process_id, msg_process, msg_id);
}

STATIC int devmm_chan_get_heap(struct devmm_chan_addr_head *addr_head, const struct devmm_chan_handlers_st *msg_process,
    struct devmm_svm_process *svm_proc, struct devmm_svm_heap **heap)
{
    u32 msg_id = addr_head->head.msg_id;
    u32 msg_bitmap = msg_process[msg_id].msg_bitmap;

    if ((svm_proc == NULL) || ((msg_bitmap & DEVMM_MSG_GET_HEAP_MASK) == 0)) {
        *heap = NULL;
        return 0;
    }
    *heap = devmm_svm_get_heap(svm_proc, addr_head->va);
    if (*heap == NULL) {
        devmm_drv_err_if(((msg_bitmap & DEVMM_MSG_RETURN_OK_MASK) == 0),
            "Address is free or not alloc, above message. (hostpid=%d; devid=%d; vfid=%d; msg_id=%u)\n",
            svm_proc->process_id.hostpid, svm_proc->process_id.devid, svm_proc->process_id.vfid, msg_id);
        return ((msg_bitmap & DEVMM_MSG_RETURN_OK_MASK) != 0) ? -EREMCHG : -ENXIO;
    }
    return 0;
}

int devmm_chan_msg_dispatch(void *msg, u32 in_data_len, u32 out_data_len, u32 *ack_len,
    const struct devmm_chan_handlers_st *msg_process)
{
    struct devmm_chan_msg_head *head_msg = (struct devmm_chan_msg_head *)msg;
    u32 data_len = DEVMM_MAX(in_data_len, out_data_len);
    u32 head_len = sizeof(struct devmm_chan_msg_head);
    struct devmm_svm_process *svm_proc = NULL;
    struct devmm_svm_heap *heap = NULL;
    u32 msg_id, proc_len;
    int ret;

    head_msg->result = 0;
    *ack_len = 0;
    msg_id = head_msg->msg_id;
    if ((msg_id >= DEVMM_CHAN_MAX_ID) || (msg_process[msg_id].chan_msg_processes == NULL)) {
        devmm_drv_err("Invalid message_id or none process func. (msg_id=%u)\n", msg_id);
        ret = -ENOMSG;
        goto save_msg_ret;
    }
    proc_len = msg_process[msg_id].msg_size + head_msg->extend_num * msg_process[msg_id].extend_size;
    if (data_len < proc_len) {
        devmm_drv_err("Invalid process_len. (proc_len=%u; in_len=%u; out_len=%u)\n",
            proc_len, in_data_len, out_data_len);
        ret = -EMSGSIZE;
        goto save_msg_ret;
    }

#ifdef CFG_FEATURE_VFIO
    if (devmm_pm_is_vm_scene(msg)) {
        ret = devmm_pm_chan_msg_dispatch(msg, in_data_len, out_data_len, ack_len);
        goto save_msg_ret;
    }
#endif

    if (head_msg->process_id.vfid >= DEVMM_MAX_VF_NUM) {
        devmm_drv_err("Message_id has invalid. (msg_id=%u; vfid=%d)\n", msg_id, head_msg->process_id.vfid);
        ret = -EINVAL;
        goto save_msg_ret;
    }
    devmm_chan_update_msg_process_id(head_msg);

    ret = devmm_chan_get_svm_proc_and_lock(head_msg, msg_process, &svm_proc);
    if (ret != 0) {
        /* set DEVMM_MSG_RETURN_OK_MASK when thread exit will return ower died ret, msg return ok */
        ret = (ret == -EOWNERDEAD) ? 0 : ret;
        goto save_msg_ret;
    }
    ret = devmm_chan_get_heap((struct devmm_chan_addr_head *)msg, msg_process, svm_proc, &heap);
    if (ret != 0) {
        /* set DEVMM_MSG_RETURN_OK_MASK when heap destory will return Remote address changed, msg return ok */
        ret = (ret == -EREMCHG) ? 0 : ret;
        goto put_chan_lock;
    }
    ret = devmm_chan_update_msg_logic_id(svm_proc, head_msg);
    if (ret != 0) {
        goto put_chan_lock;
    }
    ret = msg_process[msg_id].chan_msg_processes(svm_proc, heap, msg, ack_len);
put_chan_lock:
    devmm_svm_put_channel_lock(svm_proc, msg_process, msg_id);
save_msg_ret:
    /*
     * 1. if svm process is wrong, the error code is returned by head_msg->result
     * 2. for the performance, if svm process is right, return nothing
     * 3. devmm_chan_msg_dispatch always return 0, beacuse another error code means pcie msg wrong
     */
    if (ret != 0) {
        head_msg->result = (short)ret;
        *ack_len = (*ack_len > head_len) ? *ack_len : head_len;
    }

    return 0;
}
