/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2019-2022. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2019-10-15
 */
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/version.h>
#include <linux/delay.h>

#include "kernel_version_adapt.h"
#include "devdrv_interface.h"

#include "devmm_adapt.h"
#include "svm_heap_mng.h"
#include "svm_log.h"
#include "devmm_channel.h"
#include "devmm_common.h"

bool g_devmm_true = 1;
bool g_devmm_false = 0;

bool devmm_is_host_agent(u32 agent_id)
{
    return (agent_id == SVM_HOST_AGENT_ID);
}

void devmm_try_usleep_by_time(u32 *pre_stamp, u32 time)
{
    u32 timeinterval;

    timeinterval = jiffies_to_msecs(jiffies - *pre_stamp);
    if (timeinterval > time) {
        usleep_range(1, 2); /* sleep 1~2us */
        *pre_stamp = (u32)jiffies;
    }
}

void devmm_try_cond_resched_by_time(u32 *pre_stamp, u32 time)
{
    u32 timeinterval;

    timeinterval = jiffies_to_msecs(jiffies - *pre_stamp);
    if (timeinterval > time) {
        cond_resched();
        *pre_stamp = (u32)jiffies;
    }
}

void devmm_try_cond_resched(u32 *pre_stamp)
{
    devmm_try_cond_resched_by_time(pre_stamp, DEVMM_WAKEUP_TIMEINTERVAL);
}

bool devmm_smmu_is_opening(void)
{
    return ((devmm_svm->smmu_status == DEVMM_SMMU_STATUS_OPENING) ? DEVMM_TRUE : DEVMM_FALSE);
}

int devmm_get_user_pages_fast(u64 va, u64 total_num, int write, struct page **pages)
{
    u64 got_num, expected_num, remained_num, tmp_num, tmp_va;
    u32 stamp;

    stamp = (u32)jiffies;
    for (got_num = 0; got_num < total_num;) {
        tmp_va = va + got_num * PAGE_SIZE;
        remained_num = total_num - got_num;
        expected_num = (remained_num > DEVMM_GET_2M_PAGE_NUM) ? DEVMM_GET_2M_PAGE_NUM : remained_num;
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)
        tmp_num = (u32)get_user_pages_fast(tmp_va, (int)expected_num, write, &pages[got_num]);
#else
        tmp_num = (u32)get_user_pages_fast(tmp_va, (int)expected_num, (write != 0) ? FOLL_WRITE : 0, &pages[got_num]);
#endif
        got_num += (tmp_num > 0) ? tmp_num : 0;
        if (tmp_num != expected_num) {
            devmm_drv_err("Get_user_pages_fast fail. (va=0x%llx; expected_page_num=%llu; real_got_page_num=%llu)\n",
                tmp_va, expected_num, tmp_num);
            goto page_err;
        }
        devmm_try_cond_resched(&stamp);
    }

    return 0;

page_err:
    devmm_unpin_user_pages(pages, total_num, got_num);

    return -E2BIG;
}

void devmm_pin_user_pages(struct page **pages, u64 page_num)
{
    u32 stamp = (u32)jiffies;
    u64 i;

    for (i = 0; i < page_num; i++) {
        get_page(pages[i]);
        devmm_try_cond_resched(&stamp);
    }
}

void devmm_unpin_user_pages(struct page **pages, u64 page_num, u64 unpin_num)
{
    u32 stamp;
    u64 i;

    if (unpin_num > page_num || pages == NULL) {
        return;
    }

    stamp = (u32)jiffies;
    for (i = 0; i < unpin_num; i++) {
        if (pages[i] != NULL) {
            put_page(pages[i]);
            pages[i] = NULL;
        }
        devmm_try_cond_resched(&stamp);
    }
}

u64 devmm_get_pagecount_by_size(u64 vptr, u64 sizes, u32 page_size)
{
    u64 chunk_cnt = 0;
    u64 tem_byte;

    if ((page_size == 0) || (sizes > DEVMM_MAX_MAPPED_RANGE)) {
        devmm_drv_err("Chunk_page_size or byte_count is error. "
            "(chunk_page_size=%u; byte_count=%llu)", page_size, sizes);
        return chunk_cnt;
    }
    tem_byte = ((vptr & (page_size - 1)) + sizes);
    chunk_cnt = tem_byte / page_size;
    if ((tem_byte & (page_size - 1)) != 0) {
        chunk_cnt++;
    }
    return chunk_cnt;
}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
ssize_t devmm_read_file(struct file *fp, char *dst_addr, size_t fsize, loff_t *pos)
{
    return kernel_read(fp, dst_addr, fsize, pos);
}
#else
ssize_t devmm_read_file(struct file *fp, char *dst_addr, size_t fsize, loff_t *pos)
{
    mm_segment_t old_fs;
    ssize_t ret;

    old_fs = get_fs();
    /*lint -emacro(501,KERNEL_DS)*/
    set_fs((mm_segment_t)KERNEL_DS); /*lint!e501*/
    ret = vfs_read(fp, dst_addr, fsize, pos);
    set_fs(old_fs);
    return ret;
}
#endif

char *devmm_read_line(struct file *fp, char *buf, u32 buf_len)
{
    u32 i = 0;
    long ret;

    ret = devmm_read_file(fp, buf, buf_len - 1, &fp->f_pos);
    if (ret <= 0) {
        devmm_drv_warn("File filestring not right. (buf_len=%u)\n", buf_len);
        return NULL;
    }

    while (g_devmm_true) {
        if (buf[i] == '\n') {
            i++;
            break;
        }

        i++;

        if (i >= ret) {
            break;
        }
    }

    if (i < ret) {
        fp->f_pos += i - ret;
    }

    if (i < buf_len) {
        buf[i] = 0;
    }

    return buf;
}

int devmm_check_va_add_size_by_heap(struct devmm_svm_heap *heap, u64 va, u64 size)
{
    u64 heap_total_cnt, heap_used_count, heap_free_count, va_need_count;

    if (size > heap->heap_size) {
        devmm_drv_err("Size is bad. (va=0x%llx; size=%llu; heap_size=%llu)\n",
                      va, size, heap->heap_size);
        return -EINVAL;
    }
    heap_used_count = (va - heap->start) / heap->chunk_page_size;
    heap_total_cnt = heap->heap_size / heap->chunk_page_size;
    heap_free_count = heap_total_cnt - heap_used_count;
    va_need_count = devmm_get_pagecount_by_size(va, size, heap->chunk_page_size);
    if ((va_need_count == 0) || (heap_free_count < va_need_count)) {
        devmm_drv_err("Va_need_count is zero, or heap_free_count < va_need_count. (heap_free_count=%llu; "
                      "va_need_count=%llu; va=0x%llx; size=%llu; heap_start_va=0x%llx; page_size=%u)\n",
                      heap_free_count, va_need_count, va, size, heap->start, heap->chunk_page_size);
        return -EINVAL;
    }

    return 0;
}

bool devmm_check_common_input_heap_info(struct devmm_svm_process *svm_pro,
    struct devmm_update_heap_para *cmd, u32 devid)
{
    u32 heap_num;

    if ((cmd->op == DEVMM_HEAP_ENABLE) && (devmm_svm_mem_is_enable(svm_pro) == false) &&
        ((cmd->heap_sub_type == SUB_SVM_TYPE) || (cmd->heap_sub_type == SUB_HOST_TYPE))) {
        devmm_drv_err("Host svm mem is disable.\n");
        return false;
    }

    if ((cmd->op == DEVMM_HEAP_ENABLE)&& devmm_is_host_agent(devid) &&
        ((cmd->heap_sub_type != SUB_DEVICE_TYPE) || (cmd->heap_type != DEVMM_HEAP_CHUNK_PAGE))) {
        devmm_drv_err("Host agent only support heap SUB_DEVICE_TYPE DEVMM_HEAP_CHUNK_PAGE."
            " (heap_sub_type=%u; heap_type=%u).\n", cmd->heap_sub_type, cmd->heap_type);
        return false;
    }

    if ((cmd->heap_size > DEVMM_MAX_ALLOC_PAGE_NUM * PAGE_SIZE) ||
        (cmd->heap_size == 0) ||
        (cmd->heap_size % DEVMM_HEAP_SIZE != 0)) {
        devmm_drv_err("Input error. (hostpid=%d; devid=%d; vfid=%d; size=0x%llx)\n",
            svm_pro->process_id.hostpid, svm_pro->process_id.devid, svm_pro->process_id.vfid,
            cmd->heap_size);
        return false;
    }
    heap_num = (u32)(cmd->heap_size / DEVMM_HEAP_SIZE);
    if ((cmd->heap_idx >= DEVMM_MAX_HEAP_NUM) || ((cmd->heap_idx + heap_num) >= DEVMM_MAX_HEAP_NUM)) {
        return false;
    }

    /* heap type and page size must match */
    if (cmd->op == DEVMM_HEAP_ENABLE) {
        if ((cmd->heap_type < DEVMM_HEAP_PINNED_HOST) || (cmd->heap_type > DEVMM_HEAP_CHUNK_PAGE)) {
            return false;
        }
        if (cmd->heap_sub_type >= SUB_MAX_TYPE) {
            return false;
        }
        return DEVMM_TRUE;
    } else if (cmd->op == DEVMM_HEAP_DISABLE) {
        return true;
    }

    return false;
}

bool devmm_check_input_heap_info(struct devmm_svm_process *svm_pro,
    struct devmm_update_heap_para *cmd, u32 devid)
{
    struct devmm_svm_heap *heap_check = NULL;
    u32 heap_num, i;

    if (devmm_check_common_input_heap_info(svm_pro, cmd, devid) == false) {
        devmm_drv_err("Input error. (hostpid=%d; devid=%d; vfid=%d; size=0x%llx)\n",
            svm_pro->process_id.hostpid, svm_pro->process_id.devid, svm_pro->process_id.vfid, cmd->heap_size);
        return false;
    }

    heap_num = (u32)(cmd->heap_size / DEVMM_HEAP_SIZE);
    for (i = 0; i < heap_num; i++) {
        heap_check = svm_pro->heaps[i + cmd->heap_idx];
        if (cmd->op == DEVMM_HEAP_ENABLE) {
            if (devmm_check_heap_is_entity(heap_check) == true) {
                devmm_drv_err("Enable input error. (hostpid=%d; devid=%d; vfid=%d; size=0x%llx)\n",
                    svm_pro->process_id.hostpid, svm_pro->process_id.devid, svm_pro->process_id.vfid,
                    cmd->heap_size);
                return false;
            }
        } else {
            if (devmm_check_heap_is_entity(heap_check) == false) {
                devmm_drv_err("Disable input error. (hostpid=%d; devid=%d; vfid=%d; size=0x%llx)\n",
                    svm_pro->process_id.hostpid, svm_pro->process_id.devid, svm_pro->process_id.vfid,
                    cmd->heap_size);
                return false;
            }
        }
    }

    return true;
}

STATIC u32 devmm_get_page_size_by_heap_type(u32 heap_type, u32 heap_sub_type)
{
    if (heap_sub_type == SUB_RESERVE_TYPE) {
        return devmm_svm->device_hpage_size;
    }

    if (heap_type == DEVMM_HEAP_HUGE_PAGE) {
        return devmm_svm->device_hpage_size;
    } else if (heap_type == DEVMM_HEAP_CHUNK_PAGE) {
        return ((heap_sub_type == SUB_DEVICE_TYPE) ? devmm_svm->device_page_size : devmm_svm->svm_page_size);
    } else if (heap_type == DEVMM_HEAP_PINNED_HOST) {
        return devmm_svm->host_page_size;
    }
    return 0;
}

/* used with the devmm_free, function as alloc */
void *devmm_kvalloc(u64 size, gfp_t flags)
{
    void *ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT | flags);
    if (ptr == NULL) {
        ptr = ka_vmalloc(size, GFP_KERNEL | __GFP_ACCOUNT | flags, PAGE_KERNEL);
    }

    return ptr;
}

void *devmm_kvzalloc(u64 size)
{
    return devmm_kvalloc(size, __GFP_ZERO);
}

/* used with the devmm_zalloc, function as free */
void devmm_kvfree(const void *ptr)
{
    if (is_vmalloc_addr(ptr)) {
        vfree(ptr);
    } else {
        kfree(ptr);
    }
}

struct vm_area_struct *devmm_find_vma(struct devmm_svm_process *svm_proc, u64 vaddr)
{
    return devmm_find_vma_proc(svm_proc->mm, svm_proc->vma, svm_proc->vma_num, vaddr);
}

bool devmm_check_heap_is_entity(struct devmm_svm_heap *heap)
{
    if ((heap == NULL) || (heap->heap_type == DEVMM_HEAP_IDLE)) {
        return false;
    }
    return true;
}

void devmm_set_heap_used_status(struct devmm_svm_heap *heap, u64 va, u64 size)
{
    u64 end_addr = round_up(va + size, HEAP_USED_PER_MASK_SIZE);
    u64 addr = round_down(va, HEAP_USED_PER_MASK_SIZE);
    u32 nr;

    for (; addr < end_addr; addr += HEAP_USED_PER_MASK_SIZE) {
        nr = (u32)((addr - heap->start) / HEAP_USED_PER_MASK_SIZE);
        if (nr >= (u32)(sizeof(heap->used_mask) * DEVMM_BITS_PER_CHAR)) {
            break;
        }
        set_bit(nr, (unsigned long*)&heap->used_mask);
    }
}

void devmm_free_heap_struct(struct devmm_svm_process *svm_process, u32 heap_idx, u32 heap_num)
{
    struct devmm_svm_heap *heap = svm_process->heaps[heap_idx];
    u32 i;

    for (i = 0; i < heap_num; i++) {
        svm_process->heaps[heap_idx + i] = NULL;
    }
    if (heap != NULL) {
        devmm_kvfree(heap);
    }
}

STATIC void devmm_set_heap_info(struct devmm_svm_heap *heap, struct devmm_update_heap_para *cmd)
{
    heap->heap_type = cmd->heap_type;
    heap->heap_idx = cmd->heap_idx;
    heap->heap_sub_type = cmd->heap_sub_type;
    heap->chunk_page_size = devmm_get_page_size_by_heap_type(cmd->heap_type, cmd->heap_sub_type);
    heap->start = DEVMM_SVM_MEM_START + cmd->heap_idx * DEVMM_HEAP_SIZE;
    heap->heap_size = cmd->heap_size;
}

STATIC void devmm_set_svm_heap_struct(struct devmm_svm_process *svm_process, struct devmm_svm_heap *heap,
    u32 heap_idx, u32 heap_num)
{
    u32 i;

    for (i = 0; i < heap_num; i++) {
        svm_process->heaps[heap_idx + i] = heap;
    }
}

int devmm_update_heap_info(struct devmm_svm_process *svm_process, struct devmm_update_heap_para *cmd)
{
    struct devmm_svm_heap *heap = NULL;
    u32 heap_num;
    int ret;

    heap_num = (u32)(cmd->heap_size / DEVMM_HEAP_SIZE);
    if (cmd->op == DEVMM_HEAP_ENABLE) {
        heap = svm_process->heaps[cmd->heap_idx];
        if (heap != NULL) {
            /* smp device already updated or setup device more than once */
            return 0;
        }
        heap = devmm_kvzalloc(sizeof(struct devmm_svm_heap));
        if (heap == NULL) {
            return -ENOMEM;
        }

        devmm_set_heap_info(heap, cmd);
        svm_process->alloced_heap_size += (cmd->heap_sub_type == SUB_SVM_TYPE) ? cmd->heap_size : 0;
        svm_process->max_heap_use = (svm_process->max_heap_use > (cmd->heap_idx + heap_num)) ?
            svm_process->max_heap_use : (cmd->heap_idx + heap_num);
        devmm_set_svm_heap_struct(svm_process, heap, cmd->heap_idx, heap_num);

        ret = devmm_vmma_mng_init(&heap->vmma_mng, heap->start, heap->heap_size);
        if (ret != 0) {
            devmm_free_heap_struct(svm_process, cmd->heap_idx, heap_num);
            return ret;
        }
    } else {
        heap = svm_process->heaps[cmd->heap_idx];

        if (heap != NULL) {
            devmm_vmma_mng_uninit(&heap->vmma_mng);
        }

        devmm_free_heap_struct(svm_process, cmd->heap_idx, heap_num);
        svm_process->alloced_heap_size -= (cmd->heap_sub_type == SUB_SVM_TYPE) ? cmd->heap_size : 0;
    }

    return 0;
}

void devmm_dev_fault_flag_set(u32 *flag, u32 shift, u32 wide, u32 value)
{
    u32 msk = ((1U << wide) - 1);
    u32 val = (msk & value);

    (*flag) &= (u32)(~(msk << shift));
    (*flag) |= (u32)(val << shift);
}

u32 devmm_dev_fault_flag_get(u32 flag, u32 shift, u32 wide)
{
    u32 msk = ((1U << wide) - 1);
    u32 val = flag >> shift;

    return (val & msk);
}

u32 devmm_get_max_page_num_of_per_msg(u32 *bitmap)
{
    u32 num;

    num = DEVMM_PAGE_NUM_PER_MSG;
#ifdef CFG_SOC_PLATFORM_ESL_FPGA
    if (devmm_page_bitmap_is_advise_continuty(bitmap) == true) {
        num = 1024; // 1024 max page_num per msg
    }
#endif
    return num;
}

const char *devmm_get_chrdev_name(void)
{
    return (const char *)DEVMM_CHRDEV_NAME;
}

#ifndef DEVMM_UT
bool devmm_is_pcie_connect(u32 dev_id)
{
    return (devdrv_get_connect_protocol(dev_id) == CONNECT_PROTOCOL_PCIE);
}
#endif

void devmm_svm_mem_enable(struct devmm_svm_process *svm_proc)
{
    svm_proc->is_enable_svm_mem = true;
}

void devmm_svm_mem_disable(struct devmm_svm_process *svm_proc)
{
    svm_proc->is_enable_svm_mem = false;
}

bool devmm_svm_mem_is_enable(struct devmm_svm_process *svm_proc)
{
    return svm_proc->is_enable_svm_mem;
}

void devmm_phy_addr_attr_pack(struct devmm_svm_process *svm_proc, u32 pg_type, u32 mem_type, bool is_continuous,
    struct devmm_phy_addr_attr *attr)
{
    attr->side = DEVMM_SIDE_TYPE;
    attr->devid = svm_proc->process_id.devid;
    attr->vfid = svm_proc->process_id.vfid;
    attr->module_id = 0;

    attr->pg_type = pg_type;
    attr->mem_type = mem_type;
    attr->is_continuous = (pg_type == DEVMM_NORMAL_PAGE_TYPE) ? is_continuous : false;
}