/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2019-2022. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2019-10-15
 */
#include <linux/types.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>

#include "devmm_common.h"
#include "svm_proc_mng.h"
#include "svm_page_cnt_stats.h"
#include "svm_proc_fs.h"

#ifdef CONFIG_PROC_FS
#define DEVMM_PROC_FS_NAME_LEN 32
#define DEVMM_PROC_FS_MODE 0400
struct proc_dir_entry *devmm_task_entry = NULL;

static void devmm_proc_fs_format_task_dir_name(pid_t pid, char *name, int len)
{
#ifndef DRV_UT
    if (sprintf_s(name, (unsigned long)len, "%d", pid) <= 0) { /* if fail just name is zero */
        devmm_drv_warn("Sprintf_s failed.\n");
    }
#endif
}

static struct proc_dir_entry *devmm_proc_fs_mk_task_dir(pid_t pid, struct proc_dir_entry *parent)
{
    char name[DEVMM_PROC_FS_NAME_LEN] = {0};

    devmm_proc_fs_format_task_dir_name(pid, name, DEVMM_PROC_FS_NAME_LEN);
    return proc_mkdir((const char *)name, parent);
}

static void devmm_proc_fs_rm_task_dir(pid_t pid, struct proc_dir_entry *parent)
{
    char name[DEVMM_PROC_FS_NAME_LEN] = {0};

    devmm_proc_fs_format_task_dir_name(pid, name, DEVMM_PROC_FS_NAME_LEN);
    remove_proc_subtree((const char *)name, parent);
}

static int devmm_task_info_show(struct seq_file *seq, void *offset)
{
    struct devmm_svm_process *svm_proc = (struct devmm_svm_process *)seq->private;
    u64 cgroup_used_page_cnt, cgroup_used_hpage_cnt;
    u64 cdm_used_page_cnt, cdm_used_hpage_cnt;

    cdm_used_page_cnt = devmm_get_cdm_used_page_cnt(&svm_proc->pg_cnt_stats, DEVMM_NORMAL_PAGE_TYPE);
    cdm_used_hpage_cnt = devmm_get_cdm_used_page_cnt(&svm_proc->pg_cnt_stats, DEVMM_HUGE_PAGE_TYPE);

    cgroup_used_page_cnt = devmm_get_cgroup_used_page_cnt(&svm_proc->pg_cnt_stats, DEVMM_NORMAL_PAGE_TYPE);
    cgroup_used_hpage_cnt = devmm_get_cgroup_used_page_cnt(&svm_proc->pg_cnt_stats, DEVMM_HUGE_PAGE_TYPE);

    seq_printf(seq, "Svm page cnt stats.(hostpid=%u; devpid=%u; devid=%u; vfid=%u; "
        "cgroup_used_page_cnt=%llu; cgroup_used_hpage_cnt=%llu; "
        "cdm_used_page_cnt=%llu; cdm_used_hpage_cnt=%llu)\n",
        svm_proc->process_id.hostpid, svm_proc->devpid,
        svm_proc->process_id.devid, svm_proc->process_id.vfid,
        cgroup_used_page_cnt, cgroup_used_hpage_cnt,
        cdm_used_page_cnt, cdm_used_hpage_cnt);

    return 0;
}

STATIC int devmm_task_sum_open(struct inode *inode, struct file *file)
{
    return single_open(file, devmm_task_info_show, pde_data(inode));
}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
static const struct proc_ops devmm_task_sum_ops = {
    .proc_open    = devmm_task_sum_open,
    .proc_read    = seq_read,
    .proc_lseek   = seq_lseek,
    .proc_release = single_release,
};
#else
static const struct file_operations devmm_task_sum_ops = {
    .owner = THIS_MODULE,
    .open    = devmm_task_sum_open,
    .read    = seq_read,
    .llseek  = seq_lseek,
    .release = single_release,
};
#endif

void devmm_proc_fs_add_task(struct devmm_svm_process *svm_proc)
{
    if ((devmm_thread_is_run_in_docker() == true) || (devmm_task_entry == NULL) || (svm_proc->task_entry != NULL)) {
        return;
    }

    svm_proc->task_entry = devmm_proc_fs_mk_task_dir(svm_proc->process_id.hostpid, devmm_task_entry);
    if (svm_proc->task_entry != NULL) {
        proc_create_data("summary", DEVMM_PROC_FS_MODE, svm_proc->task_entry, &devmm_task_sum_ops, svm_proc);
        return;
    }
}

void devmm_proc_fs_del_task(struct devmm_svm_process *svm_proc)
{
    if ((devmm_task_entry == NULL) || (svm_proc->task_entry == NULL)) {
        return;
    }
    devmm_proc_fs_rm_task_dir(svm_proc->process_id.hostpid, devmm_task_entry);
    svm_proc->task_entry = NULL;
}

static void devmm_info_show_ts_share_memory(struct seq_file *seq, struct devmm_svm_dev *svm_dev)
{
    u32 i;

    seq_printf(seq, "Svm statistics ts share memory start\r\n");
    for (i = 0; i < DEVMM_MAX_DEVICE_NUM; i++) {
        bool vf_printf;
        u32 j;

        if (svm_dev->pa_info[i].total_block_num == 0) {
            continue;
        }
        seq_printf(seq, "Ts share mem info.(devid=%d, total_block_num=%u, "
            "total_data_num=%u, free_index=%u, recycle_index=%u)\r\n",
            i, svm_dev->pa_info[i].total_block_num,
            svm_dev->pa_info[i].total_data_num,
            svm_dev->pa_info[i].free_index,
            svm_dev->pa_info[i].recycle_index);

        for (j = 0, vf_printf = true; j < DEVMM_MAX_VF_NUM; j++) {
            if (svm_dev->pa_info[i].core_num[j] == 0) {
                continue;
            }
            if (vf_printf == true) {
                vf_printf = false;
                seq_printf(seq, "Vf info:\r\n"
                    "vf_id       core_num   total_core_num       data_total        "
                    "data_free    convert_total     convert_free\r\n");
            }
            seq_printf(seq, "%03u %16u %16u %16u %16u %16llu %16llu\r\n",
                j, svm_dev->pa_info[i].core_num[j],
                svm_dev->pa_info[i].total_core_num[j],
                svm_dev->pa_info[i].vdev_total_data_num[j],
                svm_dev->pa_info[i].vdev_free_data_num[j],
                svm_dev->pa_info[i].vdev_total_convert_len[j],
                svm_dev->pa_info[i].vdev_free_convert_len[j]);
        }
    }
    seq_printf(seq, "Svm statistics ts share memory end\r\n");
}

static int devmm_info_show(struct seq_file *seq, void *offset)
{
    struct devmm_svm_dev *svm_dev = (struct devmm_svm_dev *)seq->private;

    devmm_info_show_ts_share_memory(seq, svm_dev);

    return 0;
}

STATIC int devmm_sum_open(struct inode *inode, struct file *file)
{
    return single_open(file, devmm_info_show, pde_data(inode));
}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
static const struct proc_ops devmm_sum_ops = {
    .proc_open    = devmm_sum_open,
    .proc_read    = seq_read,
    .proc_lseek   = seq_lseek,
    .proc_release = single_release,
};
#else
static const struct file_operations devmm_sum_ops = {
    .owner = THIS_MODULE,
    .open    = devmm_sum_open,
    .read    = seq_read,
    .llseek  = seq_lseek,
    .release = single_release,
};
#endif

static struct proc_dir_entry *devmm_top_entry = NULL;
void devmm_proc_fs_init(struct devmm_svm_dev *svm_dev)
{
    devmm_top_entry = proc_mkdir("svm", NULL);
    if (devmm_top_entry != NULL) {
        devmm_task_entry = proc_mkdir("task", devmm_top_entry);
        proc_create_data("summary", DEVMM_PROC_FS_MODE, devmm_top_entry, &devmm_sum_ops, svm_dev);
    }
}

void devmm_proc_fs_uninit(void)
{
    if (devmm_top_entry != NULL) {
        remove_proc_subtree("svm", NULL);
    }
}
#else
void devmm_proc_fs_add_task(struct devmm_svm_process *svm_proc)
{
}

void devmm_proc_fs_del_task(struct devmm_svm_process *svm_proc)
{
}

void devmm_proc_fs_init(struct devmm_svm_dev *svm_dev)
{
    return;
}

void devmm_proc_fs_uninit(void)
{
}
#endif
