/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2023-08-21
 */

#include <linux/gfp.h>

#include "devmm_adapt.h"
#include "devmm_common.h"
#include "svm_mem_map.h"
#include "svm_vmma_mng.h"

int devmm_vmma_mng_init(struct devmm_vmma_mng *mng, u64 va, u64 size)
{
    u64 num = size / DEVMM_VMMA_GRANULARITY_SIZE;

    mng->vmmas = kvzalloc(sizeof(struct devmm_vmma_struct *) * num, GFP_KERNEL | __GFP_ACCOUNT);
    if (mng->vmmas == NULL) {
        devmm_drv_err("Alloc failed. (num=%llu)\n", num);
        return -ENOMEM;
    }

    mng->va = va;
    mng->size = size;
    init_rwsem(&mng->rw_sem);
    return 0;
}

void devmm_vmma_mng_uninit(struct devmm_vmma_mng *mng)
{
    kvfree(mng->vmmas);
    mng->vmmas = NULL;
}

static void _devmm_vmma_get(struct devmm_vmma_struct *vmma)
{
    kref_get(&vmma->ref);
}

static void devmm_vmma_release(struct kref *kref)
{
    struct devmm_vmma_struct *vmma = container_of(kref, struct devmm_vmma_struct, ref);

    kvfree(vmma);
}

struct devmm_vmma_struct *devmm_vmma_get(struct devmm_vmma_mng *mng, u64 va)
{
    struct devmm_vmma_struct *vmma = NULL;
    u64 vmma_id;

    down_read(&mng->rw_sem);
    if ((va >= mng->va) && (va < (mng->va + mng->size))) {
        vmma_id = ((va - mng->va) / DEVMM_VMMA_GRANULARITY_SIZE);
        vmma = mng->vmmas[vmma_id];
        if (vmma != NULL) {
            _devmm_vmma_get(vmma);
        }
    }
    up_read(&mng->rw_sem);

    return vmma;
}

void devmm_vmma_put(struct devmm_vmma_struct *vmma)
{
    kref_put(&vmma->ref, devmm_vmma_release);
}

static void _devmm_vmma_erase(struct devmm_vmma_mng *mng, struct devmm_vmma_struct *vmma)
{
    u64 id_start = (vmma->info.va - mng->va) / DEVMM_VMMA_GRANULARITY_SIZE;
    u64 num = vmma->info.size / DEVMM_VMMA_GRANULARITY_SIZE;
    u64 i;

    for (i = id_start; i < (id_start + num); i++) {
        mng->vmmas[i] = NULL;
    }
}

static void devmm_vmma_erase(struct devmm_vmma_mng *mng, struct devmm_vmma_struct *vmma)
{
    down_write(&mng->rw_sem);
    _devmm_vmma_erase(mng, vmma);
    up_write(&mng->rw_sem);
}

static int _devmm_vmma_insert(struct devmm_vmma_mng *mng, struct devmm_vmma_struct *vmma, u64 id_start, u64 num)
{
    u64 i, j;

    for (i = id_start; i < (id_start + num); i++) {
        if (unlikely(mng->vmmas[i] != NULL)) {
            devmm_drv_err("Repeat insert. (i=%llu; va=0x%llx)\n", i, mng->vmmas[i]->info.va);
            goto clear_vmma;
        }
        mng->vmmas[i] = vmma;
    }

    return 0;
clear_vmma:
    for (j = id_start; j < i; j++) {
        mng->vmmas[j] = NULL;
    }
    return -EINVAL;
}

static int devmm_vmma_insert(struct devmm_vmma_mng *mng, struct devmm_vmma_struct *vmma)
{
    u64 id_start, num;
    int ret;

    down_write(&mng->rw_sem);
    if ((vmma->info.va < mng->va) || (vmma->info.size > (mng->va + mng->size - vmma->info.va))) {
        devmm_drv_err("Out of vmma mng range. (mng->va=0x%llx; mng->size=%llu; va=0x%llx; size=%llu)\n",
            mng->va, mng->size, vmma->info.va, vmma->info.size);
        up_write(&mng->rw_sem);
        return -EINVAL;
    }

    id_start = (vmma->info.va - mng->va) / DEVMM_VMMA_GRANULARITY_SIZE;
    num = vmma->info.size / DEVMM_VMMA_GRANULARITY_SIZE;
    ret = _devmm_vmma_insert(mng, vmma, id_start, num);
    up_write(&mng->rw_sem);
    return ret;
}

int devmm_vmma_create(struct devmm_vmma_mng *mng, struct devmm_vmma_info *info)
{
    struct devmm_vmma_struct *vmma = NULL;
    int ret;

    vmma = kvzalloc(sizeof(struct devmm_vmma_struct), GFP_KERNEL | __GFP_ACCOUNT);
    if (vmma == NULL) {
        devmm_drv_err("Kvzalloc failed.\n");
        return -ENOMEM;
    }

    kref_init(&vmma->ref);
    init_rwsem(&vmma->rw_sem);
    vmma->info = *info;

    ret = devmm_vmma_insert(mng, vmma);
    if (ret != 0) {
        kvfree(vmma);
    }

    return ret;
}

void devmm_vmma_destroy(struct devmm_vmma_mng *mng, struct devmm_vmma_struct *vmma)
{
    devmm_vmma_erase(mng, vmma);
    devmm_vmma_put(vmma);
}

void devmm_vmmas_destroy(struct devmm_svm_process *svm_proc, struct devmm_vmma_mng *mng)
{
    struct devmm_vmma_struct *vmma = NULL;
    u64 vmma_num = mng->size / DEVMM_VMMA_GRANULARITY_SIZE;
    u32 stamp = (u32)jiffies;
    u64 i;

    down_write(&mng->rw_sem);
    for (i = 0; i < vmma_num; i++) {
        vmma = mng->vmmas[i];
        if (vmma == NULL) {
            continue;
        }

        _devmm_vmma_erase(mng, vmma);
        if (vmma->info.side == DEVMM_SIDE_TYPE) {
            devmm_mem_unmap(svm_proc, &vmma->info);
        }
        devmm_vmma_put(vmma);
        devmm_try_cond_resched(&stamp);
    }
    up_write(&mng->rw_sem);
}

int devmm_vmma_exclusive_set(struct devmm_vmma_struct *vmma)
{
    if (down_write_trylock(&vmma->rw_sem) == 0) {
        devmm_drv_err("Addr is occupied, should release occupied before unmap. (va=0x%llx; size=%llu)\n",
            vmma->info.va, vmma->info.size);
        return -EBUSY;
    }
    return 0;
}

void devmm_vmma_exclusive_clear(struct devmm_vmma_struct *vmma)
{
    up_write(&vmma->rw_sem);
}

static int devmm_vmma_occupy_inc(struct devmm_vmma_struct *vmma)
{
    if (down_read_trylock(&vmma->rw_sem) == 0) {
        devmm_drv_err("Addr is unmapping, shouldn't be concurrent to occupy. (va=0x%llx; size=%llu)\n",
            vmma->info.va, vmma->info.size);
        return -EFAULT;
    }
    return 0;
}

static void devmm_vmma_occupy_dec(struct devmm_vmma_struct *vmma)
{
    up_read(&vmma->rw_sem);
}

void devmm_vmmas_occupy_dec(struct devmm_vmma_mng *mng, u64 va, u64 size)
{
    struct devmm_vmma_struct *vmma = NULL;
    u64 tmp_size;

    for (tmp_size = 0; tmp_size < size;) {
        vmma = devmm_vmma_get(mng, va + tmp_size);
        if (vmma == NULL) {
            return;
        }

        tmp_size += vmma->info.size;
        devmm_vmma_occupy_dec(vmma);
        devmm_vmma_put(vmma);
    }
}

int devmm_vmmas_occupy_inc(struct devmm_vmma_mng *mng, u64 va, u64 size)
{
    struct devmm_vmma_struct *vmma = NULL;
    u64 tmp_size;
    int ret;

    for (tmp_size = 0; tmp_size < size;) {
        vmma = devmm_vmma_get(mng, va + tmp_size);
        if (vmma == NULL) {
            devmm_vmmas_occupy_dec(mng, va, tmp_size);
            devmm_drv_err("Invalid addr range. (va=0x%llx; mng->va=0x%llx; mng->size=%llu)\n",
                va + tmp_size, mng->va, mng->size);
            return -EINVAL;
        }

        ret = devmm_vmma_occupy_inc(vmma);
        if (ret != 0) {
            devmm_vmma_put(vmma);
            devmm_vmmas_occupy_dec(mng, va, tmp_size);
            return ret;
        }

        tmp_size += vmma->info.size;
        devmm_vmma_put(vmma);
    }

    return 0;
}
