/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2022-11-29
 */
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>

#include "svm_msg_client.h"
#include "svm_shmem_interprocess.h"
#include "svm_task_dev_res_mng.h"
#include "devmm_channel.h"
#include "svm_dev_res_mng.h"

static DEFINE_SPINLOCK(mng_spinlock);
static struct devmm_dev_res_mng *dev_res_mng[SVM_DEV_INST_MAX_NUM];

void devmm_init_task_dev_res_info(struct devmm_task_dev_res_info *info)
{
    INIT_LIST_HEAD(&info->head);
    init_rwsem(&info->rw_sem);
}

static void devmm_init_ipc_mem_node_info(struct devmm_ipc_mem_node_info *info)
{
    hash_init(info->node_htable);
    rwlock_init(&info->rwlock);
}

static int devmm_init_dev_msg_client(struct devmm_dev_msg_client *msg_client, struct device *dev)
{
    struct devmm_dev_res_mng *mng = container_of(msg_client, struct devmm_dev_res_mng, dev_msg_client);

    if ((mng->id_inst.vfid != 0) || (mng->id_inst.devid == SVM_HOST_AGENT_ID) || (dev == NULL)) {
        return 0;
    }

    devmm_drv_info("Device message client will init. (devid=%u)\n", mng->id_inst.devid);

    msg_client->dev = dev;
    return devmm_dev_res_init(mng);
}

static void devmm_uninit_dev_msg_client(struct devmm_dev_msg_client *msg_client)
{
    struct devmm_dev_res_mng *mng = container_of(msg_client, struct devmm_dev_res_mng, dev_msg_client);

    if ((mng->id_inst.vfid != 0) || (mng->id_inst.devid == SVM_HOST_AGENT_ID) || (msg_client->dev == NULL)) {
        return;
    }

    devmm_drv_info("Device message client will uninit. (did=%d)\n", mng->id_inst.devid);
    devmm_host_dev_uninit(mng->id_inst.devid);
    if (msg_client->msg_chan != NULL) {
        devdrv_set_msg_chan_priv(msg_client->msg_chan, NULL);
        devdrv_pcimsg_free_non_trans_queue(msg_client->msg_chan);
    }

    msg_client->dev = NULL;
    msg_client->msg_chan = NULL;
}

static int devmm_dev_res_mng_res_init(struct devmm_dev_res_mng *mng,
    struct svm_id_inst *id_inst, struct device *dev)
{
    mng->id_inst = *id_inst;
    kref_init(&mng->ref);

    devmm_init_ipc_mem_node_info(&mng->ipc_mem_node_info);
    devmm_init_task_dev_res_info(&mng->task_dev_res_info);
    return devmm_init_dev_msg_client(&mng->dev_msg_client, dev);
}

static void devmm_dev_res_mng_priv_res_uninit(struct devmm_dev_res_mng *mng)
{
    devmm_uninit_dev_msg_client(&mng->dev_msg_client);
}

static void devmm_dev_res_mng_pub_res_uninit(struct devmm_dev_res_mng *mng)
{
    if ((mng->id_inst.vfid != 0) || (mng->id_inst.devid == SVM_HOST_AGENT_ID)) {
        return;
    }

    devmm_uninit_convert_addr_mng(mng->id_inst.devid);
}

/*
 * Class's resources can be classified into public and private,
 * the public resource should be protected and released when the ref is 0.
 */
static void devmm_dev_res_mng_res_uninit(struct devmm_dev_res_mng *mng)
{
    devmm_dev_res_mng_priv_res_uninit(mng);
    devmm_dev_res_mng_pub_res_uninit(mng);
}

static int devmm_dev_res_mng_insert(struct devmm_dev_res_mng *mng, struct svm_id_inst *id_inst)
{
    u32 dev_inst_id = svm_id_inst_to_dev_inst(id_inst);

    spin_lock_bh(&mng_spinlock);
    if (dev_res_mng[dev_inst_id] != NULL) {
        spin_unlock_bh(&mng_spinlock);
        devmm_drv_err("Already exist. (devid=%u; vfid=%u)\n", id_inst->devid, id_inst->vfid);
        return -EEXIST;
    }

    dev_res_mng[dev_inst_id] = mng;
    spin_unlock_bh(&mng_spinlock);
    return 0;
}

static struct devmm_dev_res_mng *devmm_dev_res_mng_erase(struct svm_id_inst *id_inst)
{
    u32 dev_inst_id = svm_id_inst_to_dev_inst(id_inst);
    struct devmm_dev_res_mng *mng = NULL;

    spin_lock_bh(&mng_spinlock);
    mng = dev_res_mng[dev_inst_id];
    if (mng == NULL) {
        spin_unlock_bh(&mng_spinlock);
        devmm_drv_err("Already erase. (devid=%u; vfid=%u)\n", id_inst->devid, id_inst->vfid);
        return NULL;
    }

    dev_res_mng[dev_inst_id] = NULL;
    spin_unlock_bh(&mng_spinlock);
    return mng;
}

int devmm_dev_res_mng_create(struct svm_id_inst *id_inst, struct device *dev)
{
    struct devmm_dev_res_mng *mng = NULL;
    int ret;

    mng = (struct devmm_dev_res_mng *)devmm_kvzalloc(sizeof(struct devmm_dev_res_mng));
    if (mng == NULL) {
        devmm_drv_err("Repeate create. (devid=%u; vfid=%u)\n", id_inst->devid, id_inst->vfid);
        return -ENOMEM;
    }

    ret = devmm_dev_res_mng_res_init(mng, id_inst, dev);
    if (ret != 0) {
        devmm_kvfree(mng);
        return ret;
    }

    ret = devmm_dev_res_mng_insert(mng, id_inst);
    if (ret != 0) {
        devmm_dev_res_mng_res_uninit(mng);
        devmm_kvfree(mng);
        return ret;
    }

    devmm_drv_info("Dev res mng create success. (devid=%u; vfid=%u)\n", id_inst->devid, id_inst->vfid);
    return 0;
}

static void devmm_dev_res_mng_release(struct kref *kref)
{
    struct devmm_dev_res_mng *mng = container_of(kref, struct devmm_dev_res_mng, ref);

    devmm_dev_res_mng_pub_res_uninit(mng);
    devmm_kvfree(mng);
}

void devmm_dev_res_mng_destroy(struct svm_id_inst *id_inst)
{
    struct devmm_dev_res_mng *mng = NULL;

    mng = devmm_dev_res_mng_erase(id_inst);
    if (mng == NULL) {
        return;
    }

    devmm_dev_res_mng_priv_res_uninit(mng);
    kref_put(&mng->ref, devmm_dev_res_mng_release);
    devmm_drv_info("Dev res mng destroy. (devid=%u; vfid=%u)\n", id_inst->devid, id_inst->vfid);
}

struct devmm_dev_res_mng *devmm_dev_res_mng_get(struct svm_id_inst *id_inst)
{
    u32 dev_inst_id = svm_id_inst_to_dev_inst(id_inst);
    struct devmm_dev_res_mng *mng = NULL;

    spin_lock_bh(&mng_spinlock);
    mng = dev_res_mng[dev_inst_id];
    if (mng != NULL) {
        kref_get(&mng->ref);
    }
    spin_unlock_bh(&mng_spinlock);

    return mng;
}

void devmm_dev_res_mng_put(struct devmm_dev_res_mng *res_mng)
{
    kref_put(&res_mng->ref, devmm_dev_res_mng_release);
}

void devmm_dev_res_mng_destroy_all(void)
{
    u32 devid, vfid;
    u32 stamp = (u32)jiffies;

    for (devid = 0; devid < SVM_MAX_AGENT_NUM; devid++) {
        for (vfid = 0; vfid < DEVMM_MAX_VF_NUM; vfid++) {
            struct devmm_dev_res_mng *mng = NULL;
            struct svm_id_inst inst;

            svm_id_inst_pack(&inst, devid, vfid);
            mng = devmm_dev_res_mng_get(&inst);
            if (mng != NULL) {
                devmm_dev_res_mng_destroy(&inst);
                devmm_dev_res_mng_put(mng);
            }
            devmm_try_cond_resched(&stamp);
        }
    }
}

