/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2019-2022. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2019-10-15
 */

#ifndef QUEUE_UT
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/printk.h>
#include <asm/atomic.h>
#include <linux/hashtable.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
#include <linux/version.h>

#include <securec.h>
#include <linux/fs.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>

#include "svm_interface.h"
#include "queue_module.h"
#include "queue_fops.h"
#include "queue_channel.h"
#include "kernel_version_adapt.h"
#include "queue_dma.h"

#define QUEUE_WAKEUP_TIMEINTERVAL 5000 /* 5s */

#define QUEUE_GET_2M_PAGE_NUM   512

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)
#define QUEUE_PAGE_WRITE        1
#endif

#define QUEUE_DMA_RETRY_CNT     1000
#define QUEUE_DMA_WAIT_MIN_TIME 100
#define QUEUE_DMA_WAIT_MAX_TIME 200
#define QUEUE_DMA_MAX_NODE_CNT  32768

void *queue_kvalloc(u64 size, gfp_t flags)
{
    void *ptr = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN | __GFP_ACCOUNT | flags);
    if (ptr == NULL) {
        ptr = ka_vmalloc(size, GFP_KERNEL | __GFP_ACCOUNT | flags, PAGE_KERNEL);
    }

    return ptr;
}

void queue_kvfree(const void *ptr)
{
    if (is_vmalloc_addr(ptr)) {
        vfree(ptr);
    } else {
        kfree(ptr);
    }
}

STATIC u64 queue_get_page_num(u64 addr, u64 addr_len)
{
    u64 align_addr_len, page_num;

    align_addr_len = ((addr & (PAGE_SIZE - 1)) + addr_len);
    page_num = align_addr_len / PAGE_SIZE;
    if ((align_addr_len & (PAGE_SIZE - 1)) != 0) {
        page_num++;
    }
    return page_num;
}

STATIC int queue_alloc_dma_blks(struct queue_dma_list *dma_list)
{
    u64 page_num = queue_get_page_num(dma_list->va, dma_list->len);

    dma_list->page = (struct page **)queue_kvalloc(page_num * sizeof(struct page *), 0);
    if (dma_list->page == NULL) {
        queue_err("kmalloc %llu failed.\n", page_num);
        return -ENOMEM;
    }
    dma_list->page_num = page_num;

    dma_list->blks = (struct queue_dma_block *)queue_kvalloc(page_num * sizeof(struct queue_dma_block), 0);
    if (dma_list->blks == NULL) {
        queue_err("kmalloc %llu failed.\n", page_num);
        dma_list->page_num = 0;
        queue_kvfree(dma_list->page);
        dma_list->page = NULL;
        return -ENOMEM;
    }
    dma_list->blks_num = page_num;

    return 0;
}

STATIC void queue_free_dma_blks(struct queue_dma_list *dma_list)
{
    queue_kvfree(dma_list->page);
    dma_list->page = NULL;
    queue_kvfree(dma_list->blks);
    dma_list->blks = NULL;
}

void queue_try_cond_resched(unsigned long *pre_stamp)
{
    unsigned long timeinterval = jiffies_to_msecs(jiffies - *pre_stamp);

    if (timeinterval > QUEUE_WAKEUP_TIMEINTERVAL) {
        cond_resched();
        *pre_stamp = jiffies;
    }
}

STATIC void queue_put_user_pages(struct page **pages, u64 page_num, u64 unpin_num)
{
    unsigned long stamp;
    u64 i;

    if ((unpin_num == 0) || (unpin_num > page_num)) {
        return;
    }

    stamp = jiffies;
    for (i = 0; i < unpin_num; i++) {
        if (pages[i] != NULL) {
            put_page(pages[i]);
            pages[i] = NULL;
        }
        queue_try_cond_resched(&stamp);
    }
}

STATIC int queue_get_user_pages_fast(u64 va, u64 page_num, struct page **pages)
{
    u64 got_num, remained_num, tmp_va;
    unsigned long stamp = jiffies;
    int expected_num, tmp_num;

    for (got_num = 0; got_num < page_num;) {
        tmp_va = va + got_num * PAGE_SIZE;
        remained_num = page_num - got_num;
        expected_num = (int)((remained_num > QUEUE_GET_2M_PAGE_NUM) ? QUEUE_GET_2M_PAGE_NUM : remained_num);
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)
        tmp_num = get_user_pages_fast(tmp_va, expected_num, QUEUE_PAGE_WRITE, &pages[got_num]);
#else
        tmp_num = get_user_pages_fast(tmp_va, expected_num, FOLL_WRITE, &pages[got_num]);
#endif
        got_num += (u64)((tmp_num > 0) ? (u32)tmp_num : 0);
        if (tmp_num != expected_num) {
            queue_err("Get_user_pages_fast fail. (bufPtr=0x%llx; already_got_num=%llu; get_va=0x%llx; "
                "expected_num=%d; get_num_or_ret=%d)\n",
                va, got_num, tmp_va, expected_num, tmp_num);
            goto err_exit;
        }
        queue_try_cond_resched(&stamp);
    }

    return 0;

err_exit:
    queue_put_user_pages(pages, page_num, got_num);
    return -EFBIG;
}

STATIC int queue_get_user_pages(struct queue_dma_list *dma_list)
{
    struct vm_area_struct *vma = NULL;
    bool svm_flag;
    int ret;

    down_read(get_mmap_sem(current->mm));
    vma = find_vma(current->mm, dma_list->va);
    if (vma == NULL) {
        up_read(get_mmap_sem(current->mm));
        queue_err("Get vma failed. (va=0x%llx; len=0x%llx; page_num=%llu)\n",
            dma_list->va, dma_list->len, dma_list->page_num);
        return -EFBIG;
    }
    svm_flag = ((vma->vm_flags & VM_PFNMAP) != 0) ? true : false;
    up_read(get_mmap_sem(current->mm));

    /* memory remap by remap_pfn_rang, get user page fast can not get page addr */
    if (svm_flag == true) {
        ret = devmm_get_pages_list(current->mm, dma_list->va, dma_list->page_num, dma_list->page);
    } else {
        ret = queue_get_user_pages_fast(dma_list->va, dma_list->page_num, dma_list->page);
    }

    return ret;
}

STATIC int queue_fill_dma_blks(struct queue_dma_list *dma_list)
{
    int ret;
    u64 i;

    ret = queue_get_user_pages(dma_list);
    if (ret != 0) {
        queue_err("Get_user_pages failed. (va=0x%llx; len=0x%llx; page_num=%llu; ret=%d)\n",
            dma_list->va, dma_list->len, dma_list->page_num, ret);
        return -EFBIG;
    }

    for (i = 0; i < dma_list->page_num; i++) {
        dma_list->blks[i].dma = page_to_phys(dma_list->page[i]); /* tmp store pa */
        dma_list->blks[i].sz = PAGE_SIZE;
    }

    return 0;
}

STATIC void queue_clear_dma_blks(struct queue_dma_list *dma_list)
{
    unsigned long stamp = jiffies;
    u64 i;

    for (i = 0; i < dma_list->page_num; i++) {
        put_page(dma_list->page[i]);
        queue_try_cond_resched(&stamp);
    }
}

STATIC void queue_merg_dma_blks(struct queue_dma_block *blks, u64 idx, u64 *merg_idx)
{
    struct queue_dma_block *merg_blks = blks;
    u64 j = *merg_idx;
    u64 i = idx;

    if ((i >= 1) && (blks[i - 1].dma + blks[i - 1].sz == blks[i].dma)) {
        merg_blks[j - 1].sz += blks[i].sz;
    } else {
        merg_blks[j].sz = blks[i].sz;
        merg_blks[j].dma = blks[i].dma;
        j++;
    }
    *merg_idx = j;
}

STATIC int queue_map_dma_blks(struct device *dev, struct queue_dma_list *dma_list)
{
    unsigned long stamp = jiffies;
    struct page *page = NULL;
    u64 i, j;

    for (i = 0; i < dma_list->blks_num; i++) {
        page = pfn_to_page(PFN_DOWN(dma_list->blks[i].dma));
        dma_list->blks[i].dma = devdrv_dma_map_page(dev, page, 0, dma_list->blks[i].sz, DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, dma_list->blks[i].dma) != 0) {
            queue_err("Dma mapping error. (dma_idx=%llu; ret=%d)\n", i, dma_mapping_error(dev, dma_list->blks[i].dma));
            goto map_dma_blks_err;
        }
        queue_try_cond_resched(&stamp);
    }

    return 0;
map_dma_blks_err:
    stamp = jiffies;
    for (j = 0; j < i; j++) {
        devdrv_dma_unmap_page(dev, dma_list->blks[j].dma, dma_list->blks[j].sz, DMA_BIDIRECTIONAL);
        queue_try_cond_resched(&stamp);
    }

    return -EIO;
}

STATIC void queue_unmap_dma_blks(struct device *dev, struct queue_dma_list *dma_list)
{
    unsigned long stamp = jiffies;
    u64 i;

    for (i = 0; i < dma_list->blks_num; i++) {
        devdrv_dma_unmap_page(dev, dma_list->blks[i].dma, dma_list->blks[i].sz, DMA_BIDIRECTIONAL);
        queue_try_cond_resched(&stamp);
    }
}

int queue_make_dma_list(struct device *dev, struct queue_dma_list *dma_list)
{
    u64 i, merg_idx;
    int ret;

    if (dma_list->va == 0 || dma_list->dma_flag == false) {
        dma_list->blks_num = 0;
        return 0;
    }
    ret = queue_alloc_dma_blks(dma_list);
    if (ret != 0) {
        queue_err("alloc dma blks failed, ret=%d.\n", ret);
        return DRV_ERROR_OUT_OF_MEMORY;
    }

    ret = queue_fill_dma_blks(dma_list);
    if (ret != 0) {
        queue_err("fill dma blks failed, ret=%d.\n", ret);
        goto free_dma_list;
    }

    for (merg_idx = 0, i = 0; i < dma_list->page_num; i++) {
        queue_merg_dma_blks(dma_list->blks, i, &merg_idx);
    }
    dma_list->blks_num = merg_idx;

    ret = queue_map_dma_blks(dev, dma_list);
    if (ret != 0) {
        queue_err("map dma blks failed, ret=%d.\n", ret);
        goto clear_dma_blks;
    }

    return 0;
clear_dma_blks:
    queue_clear_dma_blks(dma_list);
free_dma_list:
    queue_free_dma_blks(dma_list);
    return DRV_ERROR_MEMORY_OPT_FAIL;
}

void queue_clear_dma_list(struct device *dev, struct queue_dma_list *dma_list)
{
    if (dma_list->blks_num == 0) {
        return;
    }
    queue_unmap_dma_blks(dev, dma_list);
    queue_clear_dma_blks(dma_list);
    queue_free_dma_blks(dma_list);
}

int queue_dma_sync_link_copy(u32 dev_id, struct devdrv_dma_node *dma_node, u64 dma_node_num)
{
    struct devdrv_dma_node *copy_node = dma_node;
    u64 already_copy_num, left_node_num, max_per_num;
    u32 copy_num;
    int retry_cnt = 0;
    int ret = 0;

    max_per_num = QUEUE_DMA_MAX_NODE_CNT;
    for (already_copy_num = 0; already_copy_num < dma_node_num;) {
        left_node_num = dma_node_num - already_copy_num;
        copy_num = (u32)min(left_node_num, max_per_num);
        ret = devdrv_dma_sync_link_copy(dev_id, DEVDRV_DMA_DATA_TRAFFIC, DEVDRV_DMA_WAIT_INTR,
            copy_node, copy_num);
        /* dma queue is full, delay resubmit */
        if ((ret == -ENOSPC) && (retry_cnt < QUEUE_DMA_RETRY_CNT)) {
            usleep_range(QUEUE_DMA_WAIT_MIN_TIME, QUEUE_DMA_WAIT_MAX_TIME);
            retry_cnt++;
            continue;
        }

        if (ret != 0) {
            queue_err("Devdrv_dma_sync_link_copy fail. (dev_id=%u; node_cnt=%llu; ret=%d)\n",
                dev_id, dma_node_num, ret);
            return ret;
        }
        already_copy_num += copy_num;
        copy_node = copy_node + copy_num;
        retry_cnt = 0;
    }

    return ret;
}
#else
int queue_dma_ut(void)
{
    return 0;
}
#endif
