/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2023-08-21
 */

#include "devmm_adapt.h"
#include "devmm_channel.h"
#include "devmm_common.h"
#include "svm_heap_mng.h"
#include "devmm_page_cache.h"
#include "svm_msg_client.h"
#include "svm_phy_addr_blk_mng.h"
#include "svm_vmma_mng.h"
#include "svm_master_mem_map.h"

#define DEVMM_MEM_MAPPED_INVALID_DEVID (DEVMM_MAX_DEVICE_NUM + 1)

/* Cannot be mapped to multiple locations. */
static int devmm_page_bitmap_mem_mapped_state_set(u32 *page_bitmap, u32 logic_devid, bool *is_first_map)
{
    devmm_page_bitmap_lock(page_bitmap);
    if (devmm_page_bitmap_is_mem_mapped(page_bitmap) == false) {
        devmm_page_bitmap_set_flag_without_lock(page_bitmap, DEVMM_PAGE_MEM_MAPPED_MASK);
        devmm_page_bitmap_set_value_nolock(page_bitmap, DEVMM_PAGE_DEVID_SHIT, DEVMM_PAGE_DEVID_WID, logic_devid);
        devmm_page_bitmap_unlock(page_bitmap);
        *is_first_map = true;
        return 0;
    }

    if (logic_devid != devmm_page_bitmap_get_devid(page_bitmap)) {
        devmm_drv_err("Could only mem mapped by one side. (devid=%u)\n", devmm_page_bitmap_get_devid(page_bitmap));
        devmm_page_bitmap_unlock(page_bitmap);
        return -EINVAL;
    }

    devmm_page_bitmap_unlock(page_bitmap);
    *is_first_map = false;
    return 0;
}

static int devmm_mem_map_pg_bitmap_state_set(struct devmm_svm_heap *heap,
    struct devmm_mem_map_para *para, u32 logic_devid, bool *is_first_map)
{
    u32 *first_page_bitmap = NULL;
    u32 *page_bitmap = NULL;
    u64 pg_cnt = para->size / heap->chunk_page_size;
    u64 i, j;
    u32 flag, tmp_devid;
    int ret;

    flag = (para->side == MEM_HOST_SIDE) ?
        (DEVMM_PAGE_HOST_MAPPED_MASK | DEVMM_PAGE_LOCKED_HOST_MASK) :
        (DEVMM_PAGE_DEV_MAPPED_MASK | DEVMM_PAGE_LOCKED_DEVICE_MASK);

    tmp_devid = (para->side == MEM_HOST_SIDE) ? DEVMM_MEM_MAPPED_INVALID_DEVID : logic_devid;

    first_page_bitmap = devmm_get_alloced_va_fst_page_bitmap_with_heap(heap, para->va);
    if (unlikely(first_page_bitmap == NULL)) {
        devmm_drv_err("Unexpected, get first pg_bitmap failed. (va=0x%llx)\n", para->va);
        return -EINVAL;
    }

    page_bitmap = devmm_get_page_bitmap_with_heap(heap, para->va);
    if (unlikely(page_bitmap == NULL)) {
        devmm_drv_err("Unexpected, get pg_bitmap failed. (va=0x%llx)\n", para->va);
        return -EINVAL;
    }

    ret = devmm_page_bitmap_mem_mapped_state_set(first_page_bitmap, tmp_devid, is_first_map);
    if (ret != 0) {
        return ret;
    }

    for (i = 0; i < pg_cnt; i++) {
        ret = devmm_page_bitmap_check_and_set_flag(page_bitmap + i, flag | DEVMM_PAGE_ADVISE_POPULATE_MASK);
        if (ret != 0) {
            devmm_drv_err("Already maped. (already_maped=%llu; va=0x%llx; page_cnt=%llu)\n",
                i, para->va, pg_cnt);
            ret = -EADDRINUSE;
            goto clear_bitmap;
        }
        devmm_page_bitmap_set_devid(page_bitmap + i, tmp_devid);
    }

    return 0;
clear_bitmap:
    for (j = 0; j < i; j++) {
        devmm_page_bitmap_clear_flag(page_bitmap + j, flag);
    }
    if (is_first_map) {
        devmm_page_bitmap_clear_flag(first_page_bitmap, DEVMM_PAGE_MEM_MAPPED_MASK);
    }
    return ret;
}

static int devmm_mem_map_pg_bitmap_state_clear(struct devmm_svm_heap *heap,
    u64 va, u64 size, u32 side, bool clear_first_bimap_state)
{
    u32 *first_page_bitmap = NULL;
    u32 *page_bitmap = NULL;
    u64 pg_cnt, i;
    u32 flag;

    pg_cnt = size / heap->chunk_page_size;
    flag = (side == MEM_HOST_SIDE) ?
        (DEVMM_PAGE_HOST_MAPPED_MASK | DEVMM_PAGE_LOCKED_HOST_MASK) :
        (DEVMM_PAGE_DEV_MAPPED_MASK | DEVMM_PAGE_LOCKED_DEVICE_MASK);

    first_page_bitmap = devmm_get_alloced_va_fst_page_bitmap_with_heap(heap, va);
    if (unlikely(first_page_bitmap == NULL)) {
        devmm_drv_err("Unexpected, get first pg_bitmap failed. (va=0x%llx)\n", va);
        return -EINVAL;
    }

    page_bitmap = devmm_get_page_bitmap_with_heap(heap, va);
    if (unlikely(page_bitmap == NULL)) {
        devmm_drv_err("Unexpected, get pg_bitmap failed. (va=0x%llx)\n", va);
        return -EINVAL;
    }

    for (i = 0; i < pg_cnt; i++) {
        devmm_page_bitmap_clear_flag(page_bitmap + i, flag | DEVMM_PAGE_ADVISE_POPULATE_MASK);
    }
    if (clear_first_bimap_state) {
        devmm_page_bitmap_clear_flag(first_page_bitmap, DEVMM_PAGE_MEM_MAPPED_MASK);
    }
    return 0;
}

int devmm_msg_to_agent_mem_unmap(struct devmm_svm_process *svm_proc, struct devmm_vmma_info *info)
{
    struct devmm_chan_mem_unmap msg = {{{0}}};
    u64 freed_num, tmp_num;
    int ret;

    msg.head.msg_id = DEVMM_CHAN_MEM_UNMAP_H2D_ID;
    msg.head.process_id.hostpid = svm_proc->process_id.hostpid;
    msg.head.process_id.vfid = (u16)info->vfid;
    msg.head.dev_id = (u16)info->devid;

    for (freed_num = 0; freed_num < info->pg_num; freed_num += tmp_num) {
        msg.va = info->va + freed_num * info->pg_size;
        ret = devmm_chan_msg_send(&msg, sizeof(struct devmm_chan_mem_unmap), sizeof(struct devmm_chan_mem_unmap));
        if (ret != 0) {
            devmm_drv_err("Send mem unmap msg failed. (ret=%d; va=0x%llx; freed_num=%llu)\n", ret, msg.va, freed_num);
            return ret;
        }
        /* Every agent map msg will create vmma, so map per page num should equal with unmap */
        tmp_num = min((info->pg_num - freed_num), DEVMM_MEM_MAP_MAX_PAGE_NUM_PER_MSG);
        devmm_free_pages_cache(svm_proc, info->logic_devid, (u32)tmp_num, info->pg_size, msg.va, true);
    }

    return 0;
}

static int _devmm_ioctl_mem_unmap(struct devmm_svm_process *svm_proc, struct devmm_vmma_info *info)
{
    if (info->side == MEM_HOST_SIDE) {
        devmm_zap_normal_pages(svm_proc, info->va, info->pg_num);
        return 0;
    } else {
        return devmm_msg_to_agent_mem_unmap(svm_proc, info);
    }
}

static int devmm_mem_unmap_check_bitmap_state(struct devmm_svm_process *svm_proc,
    struct devmm_svm_heap *heap, u64 va, u64 size)
{
    u64 pg_cnt = size / heap->chunk_page_size;
    u32 *page_bitmap = NULL;
    bool is_translating;

    page_bitmap = devmm_get_page_bitmap_with_heap(heap, va);
    if (unlikely(page_bitmap == NULL)) {
        devmm_drv_err("Unexpected, get pg_bitmap failed. (va=0x%llx)\n", va);
        return -EINVAL;
    }

    is_translating = devmm_check_is_translate(svm_proc, va, page_bitmap, heap->chunk_page_size, pg_cnt);
    return (is_translating) ? -EBUSY : 0;
}

int devmm_ioctl_mem_unmap(struct devmm_svm_process *svm_proc, struct devmm_ioctl_arg *arg)
{
    struct devmm_mem_unmap_para *para = &arg->data.mem_unmap_para;
    struct devmm_vmma_struct *vmma = NULL;
    struct devmm_svm_heap *heap = NULL;
    u32 *page_bitmap = NULL;
    int ret;

    heap = devmm_svm_get_heap(svm_proc, para->va);
    if (heap == NULL) {
        devmm_drv_err("Is idle addr. (va=0x%llx)\n", para->va);
        return -EINVAL;
    }

    page_bitmap = devmm_get_page_bitmap_with_heap(heap, para->va);
    if (unlikely(page_bitmap == NULL)) {
        devmm_drv_err("Unexpected, get pg_bitmap failed. (va=0x%llx)\n", para->va);
        return -EINVAL;
    }

    vmma = devmm_vmma_get(&heap->vmma_mng, para->va);
    if (vmma == NULL) {
        devmm_drv_err("Get vmma failed. (va=0x%llx)\n", para->va);
        return -EINVAL;
    }

    if (para->va != vmma->info.va) {
        devmm_drv_err("Va isn't head addr. (va=0x%llx; vmma->va=0x%llx)\n", para->va, vmma->info.va);
        devmm_vmma_put(vmma);
        return -EINVAL;
    }

    ret = devmm_vmma_exclusive_set(vmma);
    if (ret != 0) {
        devmm_vmma_put(vmma);
        return ret;
    }

    ret = devmm_mem_unmap_check_bitmap_state(svm_proc, heap, vmma->info.va, vmma->info.size);
    if (ret != 0) {
        goto exclusive_clear;
    }

    devmm_svm_free_share_page_msg(svm_proc, heap, vmma->info.va, vmma->info.size, page_bitmap);
    ret = _devmm_ioctl_mem_unmap(svm_proc, &vmma->info);
    if (ret != 0) {
        goto exclusive_clear;
    }

    /* Don't clear the first bitmap status, that means the map_side is determined after the first success map. */
    devmm_mem_map_pg_bitmap_state_clear(heap, vmma->info.va, vmma->info.size, vmma->info.side, false);
    devmm_vmma_destroy(&heap->vmma_mng, vmma);

exclusive_clear:
    devmm_vmma_exclusive_clear(vmma);
    devmm_vmma_put(vmma);
    return ret;
}

static int devmm_ioctl_mem_map_para_check(struct devmm_svm_heap *heap, struct devmm_mem_map_para *para)
{
    u32 *page_bitmap = NULL;
    u64 alloced_size, pg_cnt, i;
    int ret;

    if (heap->heap_sub_type != SUB_RESERVE_TYPE) {
        devmm_drv_err("Heap sub type could only be reserve type. (va=0x%llx; heap_sub_type=%u)\n",
            para->va, heap->heap_sub_type);
        return -EINVAL;
    }

    if (para->size == 0) {
        devmm_drv_err("Size is zero.\n");
        return -EINVAL;
    }

    if (IS_ALIGNED(para->va, heap->chunk_page_size) == false) {
        devmm_drv_err("Va should be aligned by pg_size. (pg_size=%u)\n", heap->chunk_page_size);
        return -EINVAL;
    }

    if (IS_ALIGNED(para->size, heap->chunk_page_size) == false) {
        devmm_drv_err("Size should be aligned by pg_size. (size=%llu; pg_size=%u)\n",
            para->size, heap->chunk_page_size);
        return -EINVAL;
    }

    ret = devmm_check_va_add_size_by_heap(heap, para->va, para->size);
    if (ret != 0) {
        devmm_drv_err("Addr out of heap range. (va=0x%llx; size=%llu)\n", para->va, para->size);
        return ret;
    }

    if (para->side >= MEM_MAX_SIDE) {
        devmm_drv_err("Invalid side. (side=%u)\n", para->side);
        return -EINVAL;
    }

    alloced_size = devmm_get_alloced_size_from_va(heap, para->va);
    if (para->size > alloced_size) {
        devmm_drv_err("Size out of range. (size=%llu; alloced_size=%llu)\n", para->size, alloced_size);
        return -EINVAL;
    }

    page_bitmap = devmm_get_page_bitmap_with_heap(heap, para->va);
    if (page_bitmap == NULL) {
        devmm_drv_err("Unexpected, get pg_bitmap failed. (va=0x%llx)\n", para->va);
        return -EINVAL;
    }

    pg_cnt = para->size / heap->chunk_page_size;
    for (i = 0; i < pg_cnt; i++) {
        if (devmm_page_bitmap_is_page_available(page_bitmap + i) == false) {
            devmm_drv_err("Addr hasn't been alloced. (va=0x%llx; size=%llu)\n", para->va, para->size);
            return -EINVAL;
        }
    }

    return 0;
}

static int devmm_master_mem_map(struct devmm_svm_process *svm_proc, struct devmm_vmma_info *info)
{
    return devmm_mem_map(svm_proc, info);
}

static int _devmm_ioctl_mem_map(struct devmm_svm_process *svm_proc, struct devmm_vmma_info *info)
{
    if (info->side == MEM_HOST_SIDE) {
        return devmm_master_mem_map(svm_proc, info);
    } else {
        return devmm_msg_to_agent_mem_map(svm_proc, info);
    }
}

static void devmm_ioctl_vmma_info_pack(struct devmm_svm_heap *heap, struct devmm_devid *devids,
    struct devmm_mem_map_para *para, struct devmm_vmma_info *info)
{
    if (heap->heap_type == DEVMM_HEAP_HUGE_PAGE) {
        info->pg_type = MEM_HUGE_PAGE_TYPE;
        info->pg_size = devmm_svm->device_hpage_size;
    } else {
        info->pg_type = MEM_NORMAL_PAGE_TYPE;
        info->pg_size = (para->side == MEM_HOST_SIDE) ? PAGE_SIZE : devmm_svm->device_page_size;
    }

    info->va = para->va;
    info->size = para->size;
    info->pg_num = info->size / info->pg_size;

    info->side = para->side;
    info->logic_devid = devids->logical_devid;
    info->devid = devids->devid;
    info->vfid = devids->vfid;

    info->module_id = para->module_id;
    info->phy_addr_blk_id = para->id;
    info->offset_pg_num = 0;
    info->phy_addr_blk_pg_num = para->pg_num;
}

int devmm_ioctl_mem_map(struct devmm_svm_process *svm_proc, struct devmm_ioctl_arg *arg)
{
    struct devmm_mem_map_para *para = &arg->data.mem_map_para;
    struct devmm_vmma_info info = {0};
    struct devmm_svm_heap *heap = NULL;
    bool is_first_map;
    int ret;

    heap = devmm_svm_get_heap(svm_proc, para->va);
    if (heap == NULL) {
        devmm_drv_err("Invalid addr, get heap failed. (va=0x%llx)\n", para->va);
        return -EINVAL;
    }

    ret = devmm_ioctl_mem_map_para_check(heap, para);
    if (ret != 0) {
        return ret;
    }

    ret = devmm_mem_map_pg_bitmap_state_set(heap, para, arg->head.logical_devid, &is_first_map);
    if (ret != 0) {
        return ret;
    }

    devmm_ioctl_vmma_info_pack(heap, &arg->head, para, &info);
    ret = _devmm_ioctl_mem_map(svm_proc, &info);
    if (ret != 0) {
        devmm_mem_map_pg_bitmap_state_clear(heap, para->va, para->size, para->side, is_first_map);
        return ret;
    }

    ret = devmm_vmma_create(&heap->vmma_mng, &info);
    if (ret != 0) {
        _devmm_ioctl_mem_unmap(svm_proc, &info);
        devmm_mem_map_pg_bitmap_state_clear(heap, para->va, para->size, para->side, is_first_map);
        return ret;
    }

    return 0;
}

