/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2022-11-29
 */
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/spinlock.h>

#include "svm_log.h"
#include "devmm_adapt.h"
#include "devmm_common.h"
#include "svm_srcu_work.h"

struct devmm_srcu_node {
    srcu_subwork_func work_func;
    u64 *arg;
    u64 arg_size;

    struct list_head node;

    u32 type;
};

struct devmm_srcu_work default_srcu_work;

static struct devmm_srcu_node *devmm_erase_one_srcu_node(struct devmm_srcu_work *srcu_work)
{
    struct devmm_srcu_node *srcu_node = NULL;

    spin_lock_bh(&srcu_work->lock);
    if (list_empty(&srcu_work->head) != 0) {
        spin_unlock_bh(&srcu_work->lock);
        return NULL;
    }
    srcu_node = list_last_entry(&srcu_work->head, struct devmm_srcu_node, node);
    list_del(&srcu_node->node);
    spin_unlock_bh(&srcu_work->lock);

    return srcu_node;
}

static void devmm_srcu_base_work(struct work_struct *work)
{
    struct devmm_srcu_work *srcu_work = container_of(work, struct devmm_srcu_work, dwork.work);
    struct devmm_srcu_node *srcu_node = NULL;
    bool is_empty;

    srcu_node = devmm_erase_one_srcu_node(srcu_work);
    if (srcu_node == NULL) {
        return;
    }

    srcu_node->work_func(srcu_node->arg, srcu_node->arg_size);
    kvfree(srcu_node->arg);
    kvfree(srcu_node);
    srcu_node = NULL;

    is_empty = (bool)list_empty(&srcu_work->head);
    if (is_empty == false) {
        /* schedule work can just one on working, one standing, list not empty need schedule again */
        (void)schedule_delayed_work(&srcu_work->dwork, msecs_to_jiffies(0));
    }
}

/* srcu_work == NULL will use default_srcu_work */
int devmm_srcu_subwork_add(struct devmm_srcu_work *srcu_work, u32 type, srcu_subwork_func func, u64 *arg, u64 arg_size)
{
    struct devmm_srcu_work *srcu_work_tmp = srcu_work;
    struct devmm_srcu_node *srcu_node = NULL;
    gfp_t flags = (in_softirq() != 0) ? (GFP_ATOMIC | __GFP_ACCOUNT) : (GFP_KERNEL | __GFP_ACCOUNT);
    int ret;

    if (srcu_work_tmp == NULL) {
        srcu_work_tmp = &default_srcu_work;
    }

    srcu_node = kvzalloc(sizeof(struct devmm_srcu_node), flags);
    if (srcu_node == NULL) {
        return -ENOMEM;
    }

    srcu_node->arg = kvzalloc(arg_size, flags);
    if (srcu_node->arg == NULL) {
        kvfree(srcu_node);
        return -ENOMEM;
    }

    ret = memcpy_s(srcu_node->arg, arg_size, arg, arg_size);
    if (ret != 0) {
        kvfree(srcu_node->arg);
        kvfree(srcu_node);
        return ret;
    }

    srcu_node->work_func = func;
    srcu_node->arg_size = arg_size;
    srcu_node->type = type;
    spin_lock_bh(&srcu_work_tmp->lock);
    list_add(&srcu_node->node, &srcu_work_tmp->head);
    spin_unlock_bh(&srcu_work_tmp->lock);
    (void)schedule_delayed_work(&srcu_work_tmp->dwork, msecs_to_jiffies(0));

    return 0;
}

void devmm_srcu_work_init(struct devmm_srcu_work *srcu_work)
{
    spin_lock_init(&srcu_work->lock);
    INIT_LIST_HEAD(&srcu_work->head);
    INIT_DELAYED_WORK(&srcu_work->dwork, devmm_srcu_base_work);
}

static void devmm_srcu_nodes_destroy(struct devmm_srcu_work *srcu_work)
{
    struct devmm_srcu_node *srcu_node = NULL;
    u32 stamp = (u32)jiffies;

    while (1) {
        srcu_node = devmm_erase_one_srcu_node(srcu_work);
        if (srcu_node == NULL) {
            return;
        }

        if (srcu_node->type == DEVMM_SRCU_SUBWORK_ENSURE_EXEC_TYPE) {
            srcu_node->work_func(srcu_node->arg, srcu_node->arg_size);
        }

        kvfree(srcu_node->arg);
        kvfree(srcu_node);

        devmm_try_cond_resched(&stamp);
    }
}

void devmm_srcu_work_uninit(struct devmm_srcu_work *srcu_work)
{
    if (cancel_delayed_work_sync(&srcu_work->dwork) == true) {
        devmm_drv_debug("Cancel delayed_work return true.\n");
    }
    devmm_srcu_nodes_destroy(srcu_work);
}

void devmm_default_srcu_work_init(void)
{
    devmm_srcu_work_init(&default_srcu_work);
}

