/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2012-2020. All rights reserved.
 * Description: hdc core common interface.
 * Author: huawei
 * Create: 2020/5/18
 *
 * This program is free software; you can redistribute it and /or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version
 */

#include "hdcdrv_core_com.h"
#include "hdcdrv_mem_com.h"

#include <linux/device.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/delay.h>


bool hdcdrv_is_kernel_thread(void)
{
    return (current->mm == NULL);
}

int hdcdrv_get_pid(void)
{
    if (hdcdrv_is_kernel_thread()) {
        return HDCDRV_KERNEL_DEFAULT_PID;
    }

    return current->tgid;
}

int hdcdrv_get_ppid(void)
{
    if (hdcdrv_is_kernel_thread()) {
        return HDCDRV_KERNEL_DEFAULT_PID;
    }

    return current->parent->tgid;
}

char *hdcdrv_devnode(struct device *dev, umode_t *mode)
{
    if (mode != NULL) {
        *mode = HDCDRV_DEV_MODE;
    }
    return NULL;
}

int hdccom_register_cdev(struct hdcdrv_cdev *hcdev, const struct file_operations *fops)
{
    struct device *dev = NULL;
    int ret;

    ret = alloc_chrdev_region(&hcdev->dev_no, 0, HDCDRV_CDEV_COUNT, HDCDRV_CHAR_DRIVER_NAME);
    if (ret != 0) {
        hdcdrv_err("alloc char dev failed. ret %d\n", ret);
        return ret;
    }

    /* init and add char device */
    cdev_init(&hcdev->cdev, fops);
    hcdev->cdev.owner = THIS_MODULE;

    ret = cdev_add(&hcdev->cdev, hcdev->dev_no, HDCDRV_CDEV_COUNT);
    if (ret != 0) {
        hdcdrv_err("add char dev failed. ret %d\n", ret);
        goto CDEV_ADD_FAILED;
    }

    hcdev->cdev_class = class_create(THIS_MODULE, HDCDRV_CHAR_DRIVER_NAME);
    if (hcdev->cdev_class == NULL) {
        hdcdrv_err("class create failed.\n");
        ret = HDCDRV_CHAR_DEV_CREAT_FAIL;
        goto CLASS_CREATE_FAILED;
    }

    hcdev->cdev_class->devnode = hdcdrv_devnode;
    dev = device_create(hcdev->cdev_class, NULL, hcdev->dev_no, NULL, HDCDRV_CHAR_DRIVER_NAME);
    if (IS_ERR(dev)) {
        hdcdrv_err("device create failed.\n");
        ret = HDCDRV_CHAR_DEV_CREAT_FAIL;
        goto DEV_CREATE_FAILED;
    }

    hcdev->dev = dev;

    return HDCDRV_OK;

DEV_CREATE_FAILED:
    class_destroy(hcdev->cdev_class);
CLASS_CREATE_FAILED:
    cdev_del(&hcdev->cdev);
CDEV_ADD_FAILED:
    unregister_chrdev_region(hcdev->dev_no, HDCDRV_CDEV_COUNT);

    return ret;
}

void hdccom_free_cdev(struct hdcdrv_cdev *hcdev)
{
    (void)device_destroy(hcdev->cdev_class, hcdev->dev_no);
    (void)class_destroy(hcdev->cdev_class);
    (void)unregister_chrdev_region(hcdev->dev_no, HDCDRV_CDEV_COUNT);
    (void)cdev_del(&hcdev->cdev);

    hcdev->cdev_class = NULL;
    hcdev->dev = NULL;
}

int hdcdrv_send_mem_info(struct hdcdrv_fast_mem *mem, int devid, int flag)
{
    long ret;
    int i;
    u32 len = 0;
    u32 msg_size;
    struct hdcdrv_ctrl_msg_sync_mem_info *msg = NULL;
    struct mutex *sync_mem_mutex = hdcdrv_get_sync_mem_lock(devid);

    if (!hdcdrv_mem_is_notify(mem)) {
        return HDCDRV_OK;
    }

    mutex_lock(sync_mem_mutex);

    msg_size = sizeof(struct hdcdrv_ctrl_msg_sync_mem_info) + mem->phy_addr_num * sizeof(struct hdcdrv_dma_mem);
    msg = (struct hdcdrv_ctrl_msg_sync_mem_info *)hdcdrv_get_sync_mem_buf(devid);

    msg->error_code = HDCDRV_OK;
    msg->type = HDCDRV_CTRL_MSG_TYPE_SYNC_MEM_INFO;
    msg->flag = flag;
    msg->phy_addr_num = mem->phy_addr_num;
    msg->alloc_len = mem->alloc_len;
    msg->mem_type = mem->mem_type;
    msg->pid = mem->hash_va & HDCDRV_FRBTREE_PID_MASK;
    msg->hash_va = mem->hash_va;

    for (i = 0; i < msg->phy_addr_num; i++) {
        msg->mem[i].addr = mem->mem[i].addr;
        msg->mem[i].len = mem->mem[i].len;
        msg->mem[i].resv = 0;
    }

    ret = hdcdrv_non_trans_ctrl_msg_send(devid, (void *)msg, msg_size, msg_size, &len);
    if ((ret != HDCDRV_OK) || (len != sizeof(struct hdcdrv_ctrl_msg_sync_mem_info))
        || (msg->error_code != HDCDRV_OK)) {
        hdcdrv_err_limit("dev_id %d mem info msg send failed. ret %ld ,len %d error code %d\n",
            devid, ret, len, msg->error_code);
        ret = HDCDRV_SEND_CTRL_MSG_FAIL;
    }

    mutex_unlock(sync_mem_mutex);

    return ret;
}

void hdcdrv_node_status_busy(struct hdcdrv_fast_node *node)
{
    node->stamp = jiffies;
    atomic_set(&node->status, HDCDRV_NODE_BUSY);
}

void hdcdrv_node_status_idle(struct hdcdrv_fast_node *node)
{
    u32 cost_time;

    if (node->stamp != 0) {
        cost_time = jiffies_to_msecs(jiffies - node->stamp);
        if (cost_time > node->max_cost) {
            node->max_cost = cost_time;
        }

        if (cost_time > HDCDRV_NODE_BUSY_WARING) {
            hdcdrv_info_limit("node busy for %u ms, max cost %u ms\n", cost_time, node->max_cost);
        }
    }

    node->stamp = 0;
    atomic_set(&node->status, HDCDRV_NODE_IDLE);
}

void hdcdrv_node_status_idle_by_mem(struct hdcdrv_fast_mem *f_mem)
{
    struct hdcdrv_fast_node *f_node = NULL;

    if (f_mem != NULL) {
        f_node = container_of(f_mem, struct hdcdrv_fast_node, fast_mem);
        hdcdrv_node_status_idle(f_node);
    }
}

bool hdcdrv_node_is_busy(struct hdcdrv_fast_node *node)
{
    if (atomic_read(&node->status) == HDCDRV_NODE_BUSY)
        return true;
    else
        return false;
}

bool hdcdrv_node_is_timeout(int node_stamp)
{
    if (jiffies_to_msecs(jiffies - node_stamp) > HDCDRV_NODE_BUSY_TIMEOUT)
        return true;
    else
        return false;
}

long hdcdrv_bind_mem_ctx(struct hdcdrv_ctx_fmem *ctx_fmem, struct hdcdrv_fast_node *f_node)
{
    struct hdcdrv_mem_fd_list *new_node = NULL;

    new_node = (struct hdcdrv_mem_fd_list *)kzalloc(sizeof(struct hdcdrv_mem_fd_list), GFP_KERNEL | __GFP_ACCOUNT);
    if (new_node == NULL) {
        hdcdrv_err("malloc mem context failed.\n");
        return HDCDRV_MEM_ALLOC_FAIL;
    }

    new_node->f_node = f_node;
    new_node->ctx_fmem = ctx_fmem;

    spin_lock_bh(&ctx_fmem->mem_lock);
    list_add(&new_node->list, &ctx_fmem->mlist.list);
    spin_unlock_bh(&ctx_fmem->mem_lock);

    f_node->mem_fd_node = new_node;

    ctx_fmem->mem_count++;

    return HDCDRV_OK;
}

void hdcdrv_unbind_mem_ctx(struct hdcdrv_fast_node *f_node)
{
    struct hdcdrv_mem_fd_list *node = f_node->mem_fd_node;
    struct hdcdrv_ctx_fmem *ctx_fmem = NULL;

    if (node != NULL) {
        ctx_fmem = node->ctx_fmem;

        spin_lock_bh(&ctx_fmem->mem_lock);
        list_del(&node->list);
        spin_unlock_bh(&ctx_fmem->mem_lock);

        kfree(node);
        node = NULL;

        ctx_fmem->mem_count--;
    }

    f_node->mem_fd_node = NULL;
}
void hdcdrv_count_mem_info(const struct hdcdrv_fast_mem *fast_mem, struct hdcdrv_mem_stat *mem_info)
{
    if ((fast_mem->mem_type >= 0) && (fast_mem->mem_type < HDCDRV_FAST_MEM_TYPE_MAX)) {
        mem_info->mem_nums[fast_mem->mem_type]++;
        mem_info->mem_size[fast_mem->mem_type] += fast_mem->alloc_len;
    }
}

void hdcdrv_release_free_mem(struct hdcdrv_ctx_fmem *ctx_fmem)
{
    struct hdcdrv_mem_fd_list *entry = NULL;
    struct list_head *pos = NULL;
    struct list_head *n = NULL;
    struct hdcdrv_mem_stat mem_info = {{0}};

    /* memory free */
    if (!list_empty_careful(&ctx_fmem->mlist.list)) {
        hdcdrv_info("release memory : task pid %d, count %llu\n", hdcdrv_get_pid(), ctx_fmem->mem_count);

        usleep_range(HDCDRV_USLEEP_RANGE_2000, HDCDRV_USLEEP_RANGE_3000);
        list_for_each_safe(pos, n, &ctx_fmem->mlist.list)
        {
            entry = list_entry(pos, struct hdcdrv_mem_fd_list, list);
            hdcdrv_count_mem_info(&entry->f_node->fast_mem, &mem_info);
            hdcdrv_fast_mem_free_abnormal(entry->f_node);

            spin_lock_bh(&ctx_fmem->mem_lock);
            list_del(&entry->list);
            spin_unlock_bh(&ctx_fmem->mem_lock);

            kfree(entry);
        }
    }
}



