/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2019-2022. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2019-10-15
 */

#include <linux/types.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/pagemap.h>
#include <linux/hugetlb.h>

#include "kernel_version_adapt.h"
#include "devmm_common.h"
#include "devmm_adapt.h"
#include "svm_log.h"
#include "svm_mem_mng.h"

pgprot_t devmm_make_remote_pgprot(u32 flg)
{
    return pgprot_writecombine(devmm_make_pgprot(flg));
}

pgprot_t devmm_make_nocache_pgprot(u32 flg)
{
    return pgprot_device(devmm_make_pgprot(flg));
}

bool devmm_is_readonly_mem(u32 page_prot)
{
    bool is_readonly = page_prot & DEVMM_PAGE_READONLY_FLG;
    return is_readonly;
}

struct page *devmm_pa_to_page(u64 paddr)
{
    return pfn_to_page(PFN_DOWN(paddr));
}

#ifndef EMU_ST
struct vm_area_struct *devmm_find_vma_from_mm(struct mm_struct *mm, u64 vaddr)
{
    return find_vma(mm, vaddr);
}
#endif

static void devmm_print_svm_process_vma(struct vm_area_struct *vma[], u32 vma_num)
{
    u32 i;

    for (i = 0; i < vma_num; i++) {
        if (vma[i] == NULL) {
            return;
        }
        devmm_drv_err("Svm vma. (idx=%u; vm_start=0x%lx; vm_end=0x%lx; vm_pgoff=0x%lx; vm_flags=0x%lx)\n",
            i, vma[i]->vm_start, vma[i]->vm_end, vma[i]->vm_pgoff, vma[i]->vm_flags);
    }
}

struct vm_area_struct *devmm_find_vma_proc(struct mm_struct *mm, struct vm_area_struct *vma[],
    u32 vma_num, u64 vaddr)
{
    struct vm_area_struct *tmp_vma = NULL;
    u32 index;

    index = devmm_get_svm_vma_index(vaddr, vma_num);
    if ((vma[index] != NULL) && ((vaddr >= vma[index]->vm_start) && (vaddr < vma[index]->vm_end))) {
        return vma[index];
    }

    devmm_print_svm_process_vma(vma, vma_num);
    tmp_vma = find_vma(mm, vaddr);
    if (tmp_vma == NULL) {
        devmm_drv_err("Vma not exist. (vaddr=0x%llx)\n", vaddr);
        return NULL;
    }
    /* os vma may change, set again */
    devmm_set_proc_vma(mm, vma, vma_num);
    return tmp_vma;
}

static long _devmm_get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
    u64 va, u32 num, struct page **pages)
{
    long got_num;
    int locked;

    locked = 1;
    down_read(get_mmap_sem(mm));
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)
    got_num = get_user_pages_remote(mm, va, num, FOLL_WRITE, pages, NULL, NULL);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
    got_num = get_user_pages_remote(tsk, mm, va, num, FOLL_WRITE, pages, NULL, NULL);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
    got_num = get_user_pages_remote(tsk, mm, va, num, FOLL_WRITE, pages, NULL);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
    got_num = get_user_pages_remote(tsk, mm, va, num, FOLL_WRITE, 0, pages, NULL);
#else
    got_num = get_user_pages_locked(tsk, mm, va, num, FOLL_WRITE, 0, pages, &locked);
#endif
    up_read(get_mmap_sem(mm));

    return got_num;
}

int devmm_get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
    u64 va, u32 num, struct page **pages)
{
    long got_num;

    got_num = _devmm_get_user_pages_remote(tsk, mm, va, num, pages);
    if (got_num != num) {
        devmm_unpin_user_pages(pages, num, (u64)got_num);
        return -ENOMEM;
    }
    return 0;
}
