/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2019-2022. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2019-10-15
 */
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/mutex.h>

#include "ascend_kernel_hal.h"
#include "devmm_proc_info.h"
#include "devmm_common.h"
#include "devmm_addr_mng.h"

static struct devmm_mem_node *devmm_search_mem_node_no_lock(struct devmm_addr_mng *addr_mng, u64 va, u64 len)
{
    struct devmm_mem_node *mem_node = NULL;
    struct rb_node *node = NULL;
    u64 tmp_va, tmp_len;

    tmp_len = round_up((va - round_down(va, DEVMM_MEM_NODE_VA_ALIGN)) + len, DEVMM_MEM_NODE_VA_ALIGN);
    tmp_va = round_down(va, DEVMM_MEM_NODE_VA_ALIGN);

    node = addr_mng->rbtree.rb_node;
    while (node != NULL) {
        mem_node = (struct devmm_mem_node *)rb_entry(node, struct devmm_mem_node, node);

        if ((tmp_va + tmp_len) <= mem_node->va) {
            node = node->rb_left;
        } else if (tmp_va >= (mem_node->va + mem_node->len)) {
            node = node->rb_right;
        } else {
            break;
        }
    }

    if (mem_node != NULL) {
        if ((tmp_va < mem_node->va) || ((tmp_va + tmp_len) > (mem_node->va + mem_node->len))) {
            mem_node = NULL;
        }
    }

    return mem_node;
}

struct devmm_mem_node *devmm_search_mem_node(struct devmm_addr_mng *addr_mng, u64 va, u64 len)
{
    struct devmm_mem_node *mem_node = NULL;

    down_read(&addr_mng->rbtree_mutex);
    mem_node = devmm_search_mem_node_no_lock(addr_mng, va, len);
    up_read(&addr_mng->rbtree_mutex);

    return mem_node;
}

STATIC int devmm_insert_mem_node(struct devmm_addr_mng *addr_mng, struct devmm_mem_node *mem_node)
{
    struct rb_node *parent = NULL;
    struct rb_node **new_node = NULL;

    down_write(&addr_mng->rbtree_mutex);

    new_node = &(addr_mng->rbtree.rb_node);

    /* Figure out where to put new node */
    while (*new_node) {
        struct devmm_mem_node *this =
            (struct devmm_mem_node *)rb_entry(*new_node, struct devmm_mem_node, node);

        parent = *new_node;
        if ((mem_node->va + mem_node->len) <= this->va) {
            new_node = &((*new_node)->rb_left);
        } else if (mem_node->va >= (this->va + this->len)) {
            new_node = &((*new_node)->rb_right);
        } else {
            up_write(&addr_mng->rbtree_mutex);
            return -EFAULT;
        }
    }

    /* Add new node and rebalance tree. */
    rb_link_node(&mem_node->node, parent, new_node);
    rb_insert_color(&mem_node->node, &addr_mng->rbtree);

    up_write(&addr_mng->rbtree_mutex);

    return 0;
}

STATIC void devmm_erase_mem_node(struct devmm_addr_mng *addr_mng, struct devmm_mem_node *mem_node)
{
    rb_erase(&mem_node->node, &addr_mng->rbtree);
}

struct devmm_mem_node *devmm_alloc_mem_node(u64 va, u64 len, u32 page_size)
{
    struct devmm_mem_node *mem_node = NULL;
    u64 tmp_va, tmp_len;
    size_t size;

    tmp_len = round_up((va - round_down(va, DEVMM_MEM_NODE_VA_ALIGN)) + len, DEVMM_MEM_NODE_VA_ALIGN);
    tmp_va = round_down(va, DEVMM_MEM_NODE_VA_ALIGN);

    if (tmp_len < page_size) {
        return NULL;
    }

    size = sizeof(struct devmm_mem_node) + sizeof(struct devmm_addr_info) * (tmp_len / page_size);

    mem_node = (struct devmm_mem_node *)devmm_kvzalloc(size);
    if (mem_node == NULL) {
        devmm_drv_err("Kvzalloc failed. (size=%lx)\n", (unsigned long)size);
        return NULL;
    }

    mem_node->va = tmp_va;
    mem_node->len = tmp_len;
    mem_node->page_size = page_size;
    mem_node->addr_info = (struct devmm_addr_info *)(mem_node + 1);

    devmm_drv_debug("Vaddress and len alloc memory node. (va=%llx; len=%llx)\n", tmp_va, tmp_len);

    return mem_node;
}

STATIC void devmm_free_mem_node(struct devmm_mem_node *mem_node)
{
    devmm_drv_debug("Vaddress and len free memory node. (va=%llx; len=%llx).\n", mem_node->va, mem_node->len);
    devmm_kvfree(mem_node);
}

struct devmm_mem_node *devmm_get_mem_node(struct devmm_addr_mng *addr_mng, int devpid, u64 va, u64 len, u32 page_size)
{
    struct devmm_mem_node *mem_node = devmm_search_mem_node(addr_mng, va, len);
    if (mem_node != NULL) {
        return mem_node;
    }
    mem_node = devmm_alloc_mem_node(va, len, page_size);
    if (mem_node != NULL) {
        if (devmm_insert_mem_node(addr_mng, mem_node) != 0) {
            devmm_free_mem_node(mem_node);
            /* Other threads may have inserted the same node  */
            mem_node = devmm_search_mem_node(addr_mng, va, len);
        }
    }

    return mem_node;
}

int devmm_try_free_mem_node(struct devmm_addr_mng *addr_mng, struct devmm_mem_node *mem_node)
{
    if ((mem_node != NULL) && atomic_read(&mem_node->valid_page_num) == 0) {
        down_write(&addr_mng->rbtree_mutex);
        devmm_erase_mem_node(addr_mng, mem_node);
        up_write(&addr_mng->rbtree_mutex);
        devmm_free_mem_node(mem_node);
        return 0;
    }
    return -EFAULT;
}

struct devmm_addr_info *devmm_get_addr_info(struct devmm_mem_node *mem_node, u64 va)
{
    u32 offset;

    if ((mem_node == NULL) || (va < mem_node->va) || (va >= (mem_node->va + mem_node->len))) {
        return NULL;
    }

    offset = (u32)((va - mem_node->va) / mem_node->page_size);

    return &mem_node->addr_info[offset];
}

void devmm_set_dma_addr_to_addr_info(const struct devmm_addr_info *in_addr_info,
    struct devmm_mem_node *mem_node, struct devmm_addr_info *addr_info)
{
    *addr_info = *in_addr_info;
    atomic_inc(&mem_node->valid_page_num);
}

int devmm_dma_map_page(u32 dev_id, struct page *page, u32 len,
    struct devmm_mem_node *mem_node, struct devmm_addr_info *addr_info)
{
    struct device *dev = NULL;
    dma_addr_t dma_addr = 0;

    if (addr_info == NULL) {
        devmm_drv_err("Get device faild. (dev_id=%u; addr_info=%pK)\n", dev_id, addr_info);
        return -EFAULT;
    }

    /* null of host agent dev is normal */
    dev = devmm_devid_to_device(dev_id);
    if (dev != NULL) {
        dma_addr = devdrv_dma_map_page(dev, page, 0, len, DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, dma_addr) != 0) {
            devmm_drv_err("Dma map page failed. (dev_id=%u; len=%x)\n", dev_id, len);
            return -EFAULT;
        }
    } else {
        dma_addr = 0;
    }

    addr_info->dev_id = dev_id;
    addr_info->page = page;
    addr_info->len = len;
    addr_info->addr = dma_addr;
    if (mem_node != NULL) {
        atomic_inc(&mem_node->valid_page_num);
    }
    devmm_drv_debug("Dma map page details. (dev_id=%u; len=%x; addr=0x%llx)\n",
        dev_id, len, (u64)addr_info->addr);
    return 0;
}

void devmm_dma_unmap_page(struct devmm_mem_node *mem_node, struct devmm_addr_info *addr_info)
{
    if (addr_info->addr != 0) {
        struct device *dev = devmm_devid_to_device(addr_info->dev_id);
        devmm_drv_debug("Dma unmap page details. (addr=%llx; len=%x)\n", (u64)addr_info->addr, addr_info->len);
        if (dev != NULL && addr_info->len != 0) {
            dma_unmap_page(dev, addr_info->addr, addr_info->len, DMA_BIDIRECTIONAL);
        }
    }
    addr_info->page = NULL;
    addr_info->addr = 0;
    addr_info->len = 0;
    if (mem_node != NULL) {
        atomic_dec(&mem_node->valid_page_num);
    }
}

STATIC void devmm_dma_unmap_all_page(struct devmm_mem_node *mem_node)
{
    u32 stamp = (u32)jiffies;
    int num, i;

    num = (int)(mem_node->len / mem_node->page_size);

    for (i = 0; i < num; i++) {
        devmm_dma_unmap_page(mem_node, &mem_node->addr_info[i]);
        devmm_try_cond_resched(&stamp);
    }
}

void devmm_addr_mng_free_res(struct devmm_addr_mng *addr_mng)
{
    struct devmm_mem_node *mem_node = NULL;
    struct rb_node *node = NULL;
    u32 stamp = (u32)jiffies;

    down_write(&addr_mng->rbtree_mutex);
    node = rb_first(&addr_mng->rbtree);
    while (node != NULL) {
        mem_node = (struct devmm_mem_node *)rb_entry(node, struct devmm_mem_node, node);
        node = rb_next(node);

        devmm_dma_unmap_all_page(mem_node);
        if (atomic_read(&mem_node->valid_page_num) != 0) {
            devmm_drv_warn("Memnode details. (va=0x%llx; len=%llu; valid_page_num=%d)\n",
                mem_node->va, mem_node->len, atomic_read(&mem_node->valid_page_num));
        }

        devmm_erase_mem_node(addr_mng, mem_node);
        devmm_free_mem_node(mem_node);
        devmm_try_cond_resched(&stamp);
    }
    up_write(&addr_mng->rbtree_mutex);
}

void devmm_addr_mng_free_res_by_addr(struct devmm_addr_mng *addr_mng, u64 start, u64 end)
{
    struct devmm_mem_node *mem_node = NULL;
    struct rb_node *node = NULL;

    down_write(&addr_mng->rbtree_mutex);
    node = rb_first(&addr_mng->rbtree);
    while (node != NULL) {
        mem_node = (struct devmm_mem_node *)rb_entry(node, struct devmm_mem_node, node);
        node = rb_next(node);
        if ((mem_node->va >= start) && (mem_node->va < end)) {
            devmm_dma_unmap_all_page(mem_node);
            if (atomic_read(&mem_node->valid_page_num) != 0) {
                devmm_drv_warn("Memnode details. (va=0x%llx; len=%llu; valid_page_num=%d)\n",
                    mem_node->va, mem_node->len, atomic_read(&mem_node->valid_page_num));
            }
            devmm_erase_mem_node(addr_mng, mem_node);
            devmm_free_mem_node(mem_node);
        }
    }
    up_write(&addr_mng->rbtree_mutex);
}

struct devmm_mem_node *devmm_get_addr_mem_node(struct devmm_addr_mng *addr_mng, u64 va, u64 len)
{
    struct devmm_mem_node *mem_node = NULL;

    down_read(&addr_mng->rbtree_mutex);
    mem_node = devmm_search_mem_node_no_lock(addr_mng, va, len);
    if (mem_node == NULL) {
        up_read(&addr_mng->rbtree_mutex);
        return NULL;
    }
    atomic_inc(&mem_node->user_cnt);
    up_read(&addr_mng->rbtree_mutex);

    return mem_node;
}

void devmm_put_addr_mem_node(struct devmm_addr_mng *addr_mng, u64 va, u64 len)
{
    struct devmm_mem_node *mem_node = NULL;

    down_read(&addr_mng->rbtree_mutex);
    mem_node = devmm_search_mem_node_no_lock(addr_mng, va, len);
    if (mem_node == NULL) {
        up_read(&addr_mng->rbtree_mutex);
        return;
    }
    (void)atomic_dec_if_positive(&mem_node->user_cnt);
    up_read(&addr_mng->rbtree_mutex);

    return;
}

bool devmm_mem_node_is_in_use(struct devmm_mem_node *mem_node)
{
    u32 use_cnt;

    if (mem_node == NULL) {
        return false;
    }
    use_cnt = (u32)atomic_read(&mem_node->user_cnt);
    if (use_cnt != 0) {
        return true;
    }
    return false;
}

typedef bool (*devmm_mem_attr_check_func)(struct devmm_mem_node *mem_node, u64 vaddr, u64 size, u32 page_size);

static bool devmm_mem_is_alloced(struct devmm_mem_node *mem_node, u64 vaddr, u64 size, u32 page_size)
{
    struct devmm_addr_info *addr_info = NULL;
    u64 tmp_vaddr = vaddr;
    u64 end_addr = vaddr + size;

    for (; tmp_vaddr < end_addr; tmp_vaddr += page_size) {
        addr_info = devmm_get_addr_info(mem_node, vaddr);
        if (addr_info == NULL || addr_info->page == NULL) {
            return false;
        }
    }
    return true;
}

static bool devmm_mem_is_readonly(struct devmm_mem_node *mem_node, u64 vaddr, u64 size, u32 page_size)
{
    if (devmm_is_readonly_mem(mem_node->page_prot) == false) {
        return false;
    }

    return devmm_mem_is_alloced(mem_node, vaddr, size, page_size);
}

bool devmm_mem_attr_is_match(struct devmm_addr_mng *addr_mng, u64 va, u64 size, u32 page_size, u32 mem_attr)
{
    devmm_mem_attr_check_func mem_attr_check_func[DEVMM_MEM_ATTR_TYPE_MAX] = {
        devmm_mem_is_readonly,
        devmm_mem_is_alloced,
        devmm_mem_is_alloced,
    };
    struct devmm_mem_node *mem_node = NULL;
    u64 vaddr, end_addr, check_size;

    end_addr = round_up(va + size, page_size);
    vaddr = round_down(va, page_size);
    check_size = end_addr - vaddr;

    mem_node = devmm_search_mem_node(addr_mng, vaddr, check_size);
    if (mem_node != NULL) {
        return (mem_attr_check_func[mem_attr](mem_node, vaddr, check_size, page_size) == true);
    }

    for (; vaddr < end_addr; vaddr += page_size) {
        mem_node = devmm_search_mem_node(addr_mng, vaddr, page_size);
        if (mem_node == NULL) {
            return false;
        }
        if (mem_attr_check_func[mem_attr](mem_node, vaddr, page_size, page_size) == false) {
            return false;
        }
    }
    return true;
}
