/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2012-2019. All rights reserved.
 * Description: hdc mem src file
 * Author: huawei
 * Create: 2019/6/18
 *
 * This program is free software; you can redistribute it and /or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version
 */
#ifdef CONFIG_GENERIC_BUG
#undef CONFIG_GENERIC_BUG
#endif
#ifdef CONFIG_BUG
#undef CONFIG_BUG
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
#undef CONFIG_DEBUG_BUGVERBOSE
#endif
#include <linux/errno.h>
#include <linux/version.h>
#include <linux/jiffies.h>
#include <linux/ioctl.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <linux/crc32.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>

#include "hdcdrv_core.h"

struct delayed_work *hdcdrv_get_recycle_mem(void)
{
    return &(hdc_ctrl->recycle_mem);
}

struct device* hdcdrv_get_pdev_dev(int dev_id)
{
    if (hdc_ctrl->devices[dev_id].valid != HDCDRV_VALID) {
        hdcdrv_err("device %d is not ready.\n", dev_id);
        return NULL;
    }
    return hdc_ctrl->devices[dev_id].dev;
}

/* 1 fast alloc/free/map        -> uni -> ctrl.lo
*  2 add/del                    -> sep -> ctrl.dev[id].lo/re
*  3 send/rx src & pm(fid == 0) -> uni -> ctrl.lo
*  4 send/rx/dma-dst            -> sep -> ctrl.dev[id].lo/re 
*/
struct hdcdrv_dev_fmem *hdcdrv_get_dev_fmem_uni(void)
{
    return &(hdc_ctrl->fmem);
}

struct hdcdrv_dev_fmem *hdcdrv_get_dev_fmem_sep(int devid)
{
    return &(hdc_ctrl->devices[devid].fmem);
}

struct hdcdrv_dev_fmem *hdcdrv_get_dev_fmem_ex(int devid, u32 fid, u32 side)
{
    if ((side == HDCDRV_RBTREE_SIDE_LOCAL) && (fid == 0)) {
        return &(hdc_ctrl->fmem);
    } else {
        return &(hdc_ctrl->devices[devid].fmem);
    }
}

struct rb_root* hdcdrv_get_rbtree(struct hdcdrv_dev_fmem *dev_fmem, u32 side)
{
    if (side == HDCDRV_RBTREE_SIDE_LOCAL) {
        return &(dev_fmem->rbtree);
    } else {
        return &(dev_fmem->rbtree_re);
    }
}

struct hdcdrv_mem_pool *get_pool(struct hdcdrv_dev *dev, int pool_type, u32 data_len)
{
    struct hdcdrv_mem_pool *pool = NULL;

    if (data_len <= (HDCDRV_SMALL_PACKET_SEGMENT - HDCDRV_MEM_BLOCK_HEAD_SIZE)) {
            pool = &dev->small_mem_pool[pool_type];
    } else {
            pool = &dev->huge_mem_pool[pool_type];
    }

    return pool;
}

struct hdcdrv_mem_pool *get_pool_by_segment(struct hdcdrv_dev *dev, int pool_type, u32 seg_len)
{
    struct hdcdrv_mem_pool *pool = NULL;

    if (seg_len <= HDCDRV_SMALL_PACKET_SEGMENT) {
            pool = &dev->small_mem_pool[pool_type];
    } else {
            pool = &dev->huge_mem_pool[pool_type];
    }

    return pool;
}

int alloc_mem_later(struct hdcdrv_mem_pool *pool, struct list_head *wait_head)
{
    int ret = HDCDRV_OK;

    spin_lock_bh(&pool->mem_lock);
    if (pool->head == pool->tail) {
        if (wait_head != NULL) {
            if (wait_head->next == NULL) {
                list_add_tail(wait_head, &pool->wait_list);
            }
        }

        ret = HDCDRV_DMA_MEM_ALLOC_FAIL;
    }

    spin_unlock_bh(&pool->mem_lock);
    return ret;
}

int alloc_mem(int pool_type, int dev_id, int len, void **buf, dma_addr_t *addr, struct list_head *wait_head)
{
    struct hdcdrv_dev *dev = &hdc_ctrl->devices[dev_id];
    struct hdcdrv_mem_pool *pool = get_pool(dev, pool_type, len);
    int ret;

    if (dev->valid != HDCDRV_VALID) {
        hdcdrv_err("dev id %d is invalid\n", dev_id);
        return HDCDRV_DEVICE_NOT_READY;
    }

    ret = alloc_mem_later(pool, wait_head);
    if (ret != HDCDRV_OK) {
        return ret;
    }

    ret = hdccom_alloc_mem(pool, buf, addr);
    if (ret != HDCDRV_OK) {
        hdcdrv_err("pool_type %d device %d alloc_mem_pool failed, ret = %d\n", pool_type, dev_id, ret);
        return ret;
    }

    return HDCDRV_OK;
}

void hdcdrv_mem_avail(int pool_type, struct list_head *target)
{
    struct hdcdrv_msg_chan *msg_chan = NULL;

    if (pool_type == HDCDRV_MEM_POOL_TYPE_RX) {
        msg_chan = list_entry(target, struct hdcdrv_msg_chan, wait_mem_list);
        msg_chan->dbg_stat.hdcdrv_mem_avail1++;
        tasklet_schedule(&msg_chan->rx_notify_task);
    }
}

void free_mem_notify(struct hdcdrv_mem_pool *pool, u32 type)
{
    struct list_head *list = NULL;

    spin_lock_bh(&pool->mem_lock);
    if (!list_empty_careful(&pool->wait_list)) {
        list = pool->wait_list.next;
        list_del(list);
        list->next = NULL;
        list->prev = NULL;
        hdcdrv_mem_avail(type, list);
    }
    spin_unlock_bh(&pool->mem_lock);
}

void free_mem(void *buf)
{
    struct hdcdrv_mem_block_head *block_head = NULL;
    struct hdcdrv_mem_pool *pool = NULL;
    int ret;

    if (hdcdrv_mem_block_head_check(buf) != HDCDRV_OK) {
        hdcdrv_err("block head check failed\n");
        return;
    }

    block_head = HDCDRV_BLOCK_HEAD(buf);

    pool = get_pool(&hdc_ctrl->devices[block_head->devid], block_head->type, block_head->size);
    if (pool->ring == NULL) {
        hdcdrv_warn("pool ring has freed\n");
        return;
    }

    ret = hdccom_free_mem(pool, buf);
    if (ret != HDCDRV_OK) {
        hdcdrv_err("pool_type %d device %d alloc_mem_pool failed, ret = %d\n",
            block_head->type, block_head->devid, ret);
        return;
    }

    free_mem_notify(pool, block_head->type);
    return;
}

int alloc_mem_pool(int pool_type, int dev_id, u32 segment, u32 num)
{
    struct hdcdrv_dev *hdc_dev = &hdc_ctrl->devices[dev_id];
    struct hdcdrv_mem_pool *pool;
    struct hdccom_mem_init init_mem;
    int ret;

    pool = get_pool_by_segment(hdc_dev, pool_type, segment);

    init_mem.dev = hdc_dev->dev;
    init_mem.pool_type = pool_type;
    init_mem.dev_id = dev_id;
    init_mem.segment = segment;
    init_mem.num = num;

    ret = hdccom_init_mem_pool(pool, &init_mem);
    if (ret != HDCDRV_OK) {
        hdcdrv_err("dev_id %d pool_type = %d mem poll init failed\n", dev_id, pool_type);
        return ret;
    }

    hdcdrv_info("dev_id %d, segment %u, mem pool real size is %u\n", dev_id, segment, pool->size);
    return HDCDRV_OK;
}

void free_mem_pool(int pool_type, int dev_id, u32 segment)
{
    struct hdcdrv_dev *hdc_dev = &hdc_ctrl->devices[dev_id];
    struct hdcdrv_mem_pool *pool = NULL;
    int ret;

    pool = get_pool_by_segment(hdc_dev, pool_type, segment);
    ret = hdccom_free_mem_pool(pool, hdc_dev->dev, segment);
    if (ret != HDCDRV_OK) {
        hdcdrv_err("dev_id %d pool_type = %d hdccom_free_mem_pool failed. ret = %d\n", dev_id, pool_type, ret);
        return;
    }

    return;
}

int hdcdrv_mem_block_capacity(void)
{
    return hdc_ctrl->segment - HDCDRV_MEM_BLOCK_HEAD_SIZE;
}

int hdcdrv_init_mem_pool(u32 dev_id, int small_packet_numdep, int huge_packet_numdep)
{
    int ret;

    if((small_packet_numdep < HDCDRV_SMALL_PACKET_NUMDEP_MIN) ||
        (small_packet_numdep > HDCDRV_SMALL_PACKET_NUMDEP_MAX) ||
        (huge_packet_numdep < HDCDRV_HUGE_PACKET_NUMDEP_MIN) ||
        (huge_packet_numdep > HDCDRV_HUGE_PACKET_NUMDEP_MAX)) {
        hdcdrv_err("huge_packet_numdep %d, small_packet_numdep %d, invalid param\n",
            huge_packet_numdep, small_packet_numdep);
        return HDCDRV_PARA_ERR;
    }

    ret = alloc_mem_pool(HDCDRV_MEM_POOL_TYPE_TX, dev_id,
                         HDCDRV_SMALL_PACKET_SEGMENT, (0x1 << small_packet_numdep));
    if (ret != HDCDRV_OK) {
            goto FREE_SMALL_TX;
    }

    ret = alloc_mem_pool(HDCDRV_MEM_POOL_TYPE_RX, dev_id,
                         HDCDRV_SMALL_PACKET_SEGMENT, (0x1 << small_packet_numdep));
    if (ret != HDCDRV_OK) {
            goto FREE_SMALL_RX;
    }

    ret = alloc_mem_pool(HDCDRV_MEM_POOL_TYPE_TX, dev_id,
                         hdc_ctrl->segment, (0x1 << huge_packet_numdep));
    if (ret != HDCDRV_OK) {
            goto FREE_HUGE_TX;
    }

    ret = alloc_mem_pool(HDCDRV_MEM_POOL_TYPE_RX, dev_id,
                         hdc_ctrl->segment, (0x1 << huge_packet_numdep));
    if (ret != HDCDRV_OK) {
            goto FREE_HUGE_RX;
    }

    return HDCDRV_OK;

FREE_HUGE_RX:
    free_mem_pool(HDCDRV_MEM_POOL_TYPE_RX, dev_id, hdc_ctrl->segment);
FREE_HUGE_TX:
    free_mem_pool(HDCDRV_MEM_POOL_TYPE_TX, dev_id, hdc_ctrl->segment);
FREE_SMALL_RX:
    free_mem_pool(HDCDRV_MEM_POOL_TYPE_RX, dev_id, HDCDRV_SMALL_PACKET_SEGMENT);
FREE_SMALL_TX:
    free_mem_pool(HDCDRV_MEM_POOL_TYPE_TX, dev_id, HDCDRV_SMALL_PACKET_SEGMENT);

    return HDCDRV_ERR;
}

void hdcdrv_uninit_mem_pool(u32 dev_id)
{
    free_mem_pool(HDCDRV_MEM_POOL_TYPE_RX, dev_id, hdc_ctrl->segment);
    free_mem_pool(HDCDRV_MEM_POOL_TYPE_TX, dev_id, hdc_ctrl->segment);
    free_mem_pool(HDCDRV_MEM_POOL_TYPE_RX, dev_id, HDCDRV_SMALL_PACKET_SEGMENT);
    free_mem_pool(HDCDRV_MEM_POOL_TYPE_TX, dev_id, HDCDRV_SMALL_PACKET_SEGMENT);
}

long hdcdrv_fast_alloc_mem(struct hdcdrv_ctx *ctx, struct hdcdrv_cmd_alloc_mem *cmd)
{
    long ret;
    struct hdcdrv_fast_node *f_node = NULL;

    ret = hdccom_fast_alloc_mem(cmd, &f_node);
    if ((ret != HDCDRV_OK) || (f_node == NULL)) {
        hdcdrv_err("dev %d alloc mem err\n", cmd->dev_id);
        return ret;
    }

    if ((ctx != NULL) && (ctx != HDCDRV_KERNEL_WITHOUT_CTX)) {
        ret = hdcdrv_bind_mem_ctx(&ctx->ctx_fmem, f_node);
        if (ret != HDCDRV_OK) {
            hdcdrv_err("dev %d fast memory bind ctx failed!\n", cmd->dev_id);
            hdcdrv_fast_unalloc_mem(cmd, f_node);
            return ret;
        }
    }

    return HDCDRV_OK;
}

bool hdcdrv_mem_is_notify(struct hdcdrv_fast_mem *f_mem)
{
    if ((f_mem->mem_type == HDCDRV_FAST_MEM_TYPE_RX_DATA) || (f_mem->mem_type == HDCDRV_FAST_MEM_TYPE_RX_CTRL)) {
        return false;
    }

    return true;
}

