/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2019-2022. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2019-10-15
 */
#include <linux/version.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/hashtable.h>
#include <linux/version.h>
#include <linux/mm.h>

#include "devmm_common.h"
#include "devmm_proc_info.h"
#include "svm_proc_mng.h"
#include "devmm_dev.h"
#include "svm_proc_fs.h"
#include "svm_mmu_notifier.h"

void devmm_mmu_notifier_unregister_no_release(struct devmm_svm_process *svm_proc)
{
#ifndef ADAPT_KP_OS_FOR_EMU_TEST
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
    if (svm_proc->notifier != NULL) {
        mmu_notifier_put(svm_proc->notifier);
    }
#else
    mmu_notifier_unregister_no_release(&svm_proc->notifier, svm_proc->mm);
#endif
#endif
}

void devmm_svm_mmu_notifier_unreg(struct devmm_svm_process *svm_proc)
{
    bool notifier_release_flag = false;

    devmm_drv_info("Devmm notifier unreg."
        "(hostpid=%d; devpid=%d; devid=%d; vfid=%d; status=0x%x; proc_idx=0x%x; msg=%u; other_proc=%u)\n",
        svm_proc->process_id.hostpid, svm_proc->devpid, svm_proc->process_id.devid, svm_proc->process_id.vfid,
        svm_proc->proc_status, svm_proc->proc_idx, svm_proc->msg_processing, svm_proc->other_proc_occupying);
    /*
    * with user unmap, trace: do_munmap->devmm_notifier_start->devmm_svm_mmu_notifier_unreg
    * without user unmap, trace: exit->devmm_notifier_release->devmm_svm_mmu_notifier_unreg
    */
    devmm_proc_fs_del_task(svm_proc);
    devmm_wait_exit_and_del_from_hashtable_lock(svm_proc);

    mutex_lock(&svm_proc->proc_lock);
    if (svm_proc->notifier_reg_flag == DEVMM_SVM_INITED_FLAG) {
        svm_proc->notifier_reg_flag = DEVMM_SVM_UNMAP_FLAG;
        svm_proc->tsk = NULL;
        svm_proc->proc_status = DEVMM_SVM_THREAD_EXITING;
        svm_proc->inited = DEVMM_SVM_UNMAP_FLAG;
        notifier_release_flag = true;
    }
    mutex_unlock(&svm_proc->proc_lock);

    if (notifier_release_flag == true) {
        devmm_mmu_notifier_unregister_no_release(svm_proc);
        devmm_notifier_release_private(svm_proc);
#if defined(__arm__) || defined(__aarch64__)
        isb();
#endif
        svm_proc->notifier_reg_flag = DEVMM_SVM_UNINITED_FLAG;
    }
}

#ifndef ADAPT_KP_OS_FOR_EMU_TEST
STATIC inline bool devmm_mem_is_in_dvpp_vma_range(u64 start, u64 end)
{
    u64 devid, start_addr, end_addr;
    for (devid = 0; devid < DEVMM_MAX_DEVICE_NUM; devid++) {
        start_addr = DEVMM_SVM_MEM_START + devid * DEVMM_DVPP_HEAP_RESERVATION_SIZE;
        end_addr = start_addr + DEVMM_MAX_HEAP_MEM_FOR_DVPP_16G;
        if (start == start_addr && end == end_addr) {
            return true;
        }
    }
    return false;
}

STATIC bool devmm_mem_is_in_vma_range(u64 start, u64 end)
{
    u64 size = end - start;
    if (size == DEVMM_SVM_MEM_SIZE) {
        if ((start == DEVMM_SVM_MEM_START) && (end == (DEVMM_SVM_MEM_START + DEVMM_SVM_MEM_SIZE))) {
            return true;
        }
    } else if (size == DEVMM_MAX_HEAP_MEM_FOR_DVPP_16G) {
        if (devmm_mem_is_in_dvpp_vma_range(start, end)) {
            return true;
        }
    } else if (size == DEVMM_DVPP_HEAP_TOTAL_SIZE) {
        if ((start == DEVMM_SVM_MEM_START) && (end == (DEVMM_SVM_MEM_START + DEVMM_DVPP_HEAP_TOTAL_SIZE))) {
            return true;
        }
    } else if (size == DEVMM_SVM_MEM_SIZE - DEVMM_DVPP_HEAP_TOTAL_SIZE) {
        if ((start == DEVMM_SVM_MEM_START + DEVMM_DVPP_HEAP_TOTAL_SIZE) &&
            (end == DEVMM_SVM_MEM_START + DEVMM_SVM_MEM_SIZE)) {
            return true;
        }
    }

    return false;
}

STATIC int _devmm_notifier_start(struct mmu_notifier *mn, struct mm_struct *mm,
    unsigned long start, unsigned long end, bool blockable)
{
    struct devmm_svm_process *svm_proc = NULL;

    svm_proc = devmm_get_svm_proc_by_mm(mm);
    if (svm_proc == NULL) {
        devmm_drv_info("Unlikely.\n");
        /*
         * If blockable argument is set to false then the callback cannot
         * sleep and has to return with -EAGAIN. 0 should be returned
         * otherwise
         */
        return (blockable == false) ? -EAGAIN : 0;
    }

    if (devmm_mem_is_in_vma_range(start, end)) {
        if (blockable == false) {
            return -EAGAIN;
        }
        devmm_drv_info("User unmap, need to release all resources.\n");
        devmm_svm_ioctl_lock(svm_proc, DEVMM_CMD_WLOCK);
        devmm_svm_mmu_notifier_unreg(svm_proc);
        devmm_svm_ioctl_unlock(svm_proc, DEVMM_CMD_WLOCK);
    }
    return 0;
}

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
STATIC int devmm_notifier_start(struct mmu_notifier *mn, const struct mmu_notifier_range *range)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0))
    bool blockable = mmu_notifier_range_blockable(range);
#else
    bool blockable = range->blockable;
#endif

    return _devmm_notifier_start(mn, range->mm, range->start, range->end, blockable);
}
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
STATIC int devmm_notifier_start(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end,
    bool blockable)
{
    return _devmm_notifier_start(mn, mm, start, end, blockable);
}
#else
STATIC void devmm_notifier_start(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end)
{
    (void)_devmm_notifier_start(mn, mm, start, end, true);
}
#endif

STATIC void devmm_notifier_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
    struct devmm_svm_process *svm_proc = NULL;

    svm_proc = devmm_get_svm_proc_by_mm(mm);
    if (svm_proc == NULL) {
        devmm_drv_err("Find svm_proc fail.\n");
        return;
    }

    devmm_svm_mmu_notifier_unreg(svm_proc);
}

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
static struct mmu_notifier *devmm_alloc_notifier(struct mm_struct *mm)
{
    struct mmu_notifier *notifier = NULL;

    notifier = (struct mmu_notifier *)kmalloc(sizeof(struct mmu_notifier),
        GFP_ATOMIC | __GFP_NOWARN | __GFP_ACCOUNT | __GFP_ZERO);
    if (notifier == NULL) {
        devmm_drv_err("Kmalloc mmu_notifier fail.\n");
        return ERR_PTR(-ENOMEM);
    }

    return notifier;
}

static void devmm_free_notifier(struct mmu_notifier *mn)
{
    kfree(mn);
    return;
}
#endif

struct mmu_notifier_ops devmm_process_mmu_notifier = {
    .invalidate_range_start = devmm_notifier_start,
    .release = devmm_notifier_release,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
    .alloc_notifier = devmm_alloc_notifier,
    .free_notifier = devmm_free_notifier,
#endif
};
#endif

int devmm_mmu_notifier_register(struct devmm_svm_process *svm_proc)
{
    int ret = 0;
#ifndef ADAPT_KP_OS_FOR_EMU_TEST
    svm_proc->notifier_reg_flag = DEVMM_SVM_INITED_FLAG;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
    {
        struct mmu_notifier *mn = NULL;
        mn = mmu_notifier_get(&devmm_process_mmu_notifier, svm_proc->mm);
        if (IS_ERR(mn)) {
            ret = (int)PTR_ERR(mn);
            svm_proc->notifier = NULL;
        } else {
            svm_proc->notifier = mn;
        }
    }
#else
    svm_proc->notifier.ops = &devmm_process_mmu_notifier;
    ret = mmu_notifier_register(&svm_proc->notifier, svm_proc->mm);
#endif
#endif
    return ret;
}