/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * Description:
 * Author: huawei
 * Create: 2023-07-25
 */

#include <linux/memcontrol.h>
#include <linux/gfp.h>
#include <linux/page-flags.h>

#include "devmm_adapt.h"
#include "svm_define.h"
#include "svm_mem_split.h"
#include "svm_cgroup_mng.h"
#include "svm_proc_gfp.h"

#ifndef EMU_ST
#define DEVMM_ALLOC_CONT_PAGES_SHIFT_THRES  16      /* 64K is (2 ^ 16 ) */
#else
#define DEVMM_ALLOC_CONT_PAGES_SHIFT_THRES  12
#endif

#define DEVMM_ALLOC_CONT_PAGES_MAX_NUM      512ULL  /* 2M is 512 * 4K */

static void devmm_page_ref_dec(struct page *pg, u64 page_size, void (*dec_func)(struct page *pg))
{
    int ref;

    lock_page(pg);
    ref = page_count(pg);
    if (ref > 1) {
        dec_func(pg);
        unlock_page(pg);
    } else {
        unlock_page(pg);
        /* Clear user data for security. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0)
        if (PagePoisoned(pg) == 0) {
            (void)memset_s(page_address(pg), page_size, 0, page_size);
        }
#else
        (void)memset_s(page_address(pg), page_size, 0, page_size);
#endif
        dec_func(pg);
    }
}

static void free_normal_page(struct page *pg)
{
    __free_page(pg);
}

static void _devmm_free_normal_page(struct page *pg)
{
    devmm_page_ref_dec(pg, PAGE_SIZE, free_normal_page);
}

static void devmm_free_normal_page(struct devmm_phy_addr_attr *attr, struct page *pg)
{
    int nid = page_to_nid(pg);

    _devmm_free_normal_page(pg);
    devmm_normal_free_mem_size_add(attr->devid, attr->vfid, nid, 1);
}

static void devmm_free_normal_pages(struct devmm_phy_addr_attr *attr, struct page **pages, u64 pg_num)
{
    u64 i;

    for (i = 0; i < pg_num; i++) {
        devmm_free_normal_page(attr, pages[i]);
    }
}

static void _devmm_free_huge_page(struct page *hpage)
{
    /* Keep the status quo, call put_page. */
    devmm_page_ref_dec(hpage, HPAGE_SIZE, put_page);
}

static void devmm_free_huge_page(struct devmm_phy_addr_attr *attr, struct page *hpage)
{
    int nid = page_to_nid(hpage);
    u32 flag = devmm_get_hugetlb_alloc_flag(hpage);

    _devmm_free_huge_page(hpage);
    devmm_huge_free_mem_size_add(attr->devid, attr->vfid, nid, 1, flag);
}

static void devmm_free_huge_pages(struct devmm_phy_addr_attr *attr, struct page **pages, u64 pg_num)
{
    u64 i;

    for (i = 0; i < pg_num; i++) {
        devmm_free_huge_page(attr, pages[i]);
    }
}

void devmm_free_pages(struct devmm_phy_addr_attr *attr, struct page **pages, u64 pg_num)
{
    if (attr->pg_type == DEVMM_HUGE_PAGE_TYPE) {
        devmm_free_huge_pages(attr, pages, pg_num);
    } else {
        devmm_free_normal_pages(attr, pages, pg_num);
    }
}

static void devmm_get_sub_pages_from_compound_page(struct page *compound_page,
    u32 order, struct page **pages, u64 pg_num)
{
    struct page *pg = NULL;
    u64 alloced_page_num = 1ull << order;
    u64 i;

    if (order != 0) {
        split_page(compound_page, order);
        devmm_split_page_memcg(compound_page, alloced_page_num);
    }
    for (i = 0, pg = compound_page; i < pg_num; pg++, i++) {
        pages[i] = pg;
    }
    for (; i < alloced_page_num; pg++, i++) {
        __free_page(pg);
    }
}

static int _devmm_alloc_pages_node(struct devmm_phy_addr_attr *attr,
    int nid, struct page **pages, u64 pg_num)
{
    struct page *compound_page = NULL;
    u32 order = (u32)get_order(pg_num << PAGE_SHIFT);
    u32 flag = devmm_get_alloc_mask();
    int ret;

    ret = devmm_normal_free_mem_size_sub(attr->devid, attr->vfid, nid, pg_num);
    if (ret != 0) {
        devmm_drv_info("Not enough normal free mem. (devid=%u; vfid=%u; nid=%d; pg_num=%llu)\n",
            attr->devid, attr->vfid, nid, pg_num);
        return ret;
    }

    compound_page = alloc_pages_node(nid, flag, order);
    if (compound_page == NULL) {
        devmm_normal_free_mem_size_add(attr->devid, attr->vfid, nid, pg_num);
        return -ENOMEM;
    }

    devmm_get_sub_pages_from_compound_page(compound_page, order, pages, pg_num);
    return 0;
}

static struct page *devmm_alloc_pages_node(struct devmm_phy_addr_attr *attr,
    int *latest_nid, int nids[], u32 nid_num)
{
    struct page *pg = NULL;
    int ret, i;

    ret = _devmm_alloc_pages_node(attr, *latest_nid, &pg, 1);
    if (ret == 0) {
        return pg;
    }

    for (i = 0; i < nid_num; i++) {
        if (nids[i] == *latest_nid) {
            continue;
        }
        ret = _devmm_alloc_pages_node(attr, nids[i], &pg, 1);
        if (ret == 0) {
            *latest_nid = nids[i];
            return pg;
        }
    }

    return NULL;
}

static int _devmm_alloc_normal_pages(struct devmm_phy_addr_attr *attr,
    int nids[], u32 nid_num, struct page **pages, u64 pg_num)
{
    int latest_nid = nids[0];
    u64 i;

    for (i = 0; i < pg_num; i++) {
        pages[i] = devmm_alloc_pages_node(attr, &latest_nid, nids, nid_num);
        if (pages[i] == NULL) {
            devmm_free_normal_pages(attr, pages, i);
            return -ENOMEM;
        }
    }

    return 0;
}

static bool devmm_is_necessary_to_alloc_continuous_pages(void)
{
/* If page_size >= 64k, the performance of dma_copy is not greatly improved.(Doubtful, keep the status quo first) */
#if (PAGE_SHIFT < DEVMM_ALLOC_CONT_PAGES_SHIFT_THRES)
    return devmm_can_get_continuity_page();
#else
    return false;
#endif
}

static int devmm_alloc_continuous_pages(struct devmm_phy_addr_attr *attr,
    int nids[], u32 nid_num, struct page **pages, u64 pg_num)
{
    int ret, i;

    for (i = 0; i < nid_num; i++) {
        ret = _devmm_alloc_pages_node(attr, nids[i], pages, pg_num);
        if (ret == 0) {
            return 0;
        }
    }

    return -ENOMEM;
}

/* Alloc continuous pages every 2MB, and return got_num. */
static u64 _devmm_try_alloc_continuous_pages(struct devmm_phy_addr_attr *attr,
    int nids[], u32 nid_num, struct page **pages, u64 pg_num)
{
    u64 num, i = 0;
    int ret;

    for (i = 0; i < pg_num; i += num) {
        num = min(DEVMM_ALLOC_CONT_PAGES_MAX_NUM, (pg_num - i));
        ret = devmm_alloc_continuous_pages(attr, nids, nid_num, &pages[i], num);
        if (ret != 0) {
            return i;
        }
    }

    return pg_num;
}

/* The returned pages is not necessarily continuous, but is as continuous as possible. */
static int devmm_try_alloc_continuous_pages(struct devmm_phy_addr_attr *attr,
    int nids[], u32 nid_num, struct page **pages, u64 pg_num)
{
    u64 got_num = 0;
    int ret;

    if (devmm_is_necessary_to_alloc_continuous_pages()) {
        got_num = _devmm_try_alloc_continuous_pages(attr, nids, nid_num, pages, pg_num);
        if (got_num == pg_num) {
            return 0;
        }
    }

    ret = _devmm_alloc_normal_pages(attr, nids, nid_num, &pages[got_num], pg_num - got_num);
    if (ret != 0) {
        devmm_free_normal_pages(attr, pages, got_num);
    }

    return ret;
}

static int devmm_alloc_normal_pages(struct devmm_phy_addr_attr *attr,
    int nids[], u32 nid_num, struct page **pages, u64 pg_num)
{
    if (attr->is_continuous) {
        return devmm_alloc_continuous_pages(attr, nids, nid_num, pages, pg_num);
    } else {
        return devmm_try_alloc_continuous_pages(attr, nids, nid_num, pages, pg_num);
    }
}

static struct page *devmm_alloc_hpage(struct devmm_phy_addr_attr *attr, int nid, u32 flag)
{
    struct page *hpage = NULL;
    int ret;

    ret = devmm_huge_free_mem_size_sub(attr->devid, attr->vfid, nid, 1, flag);
    if (ret != 0) {
        devmm_drv_info("Not enough huge free mem. (devid=%u; vfid=%u; nid=%d; flag=%u)\n",
            attr->devid, attr->vfid, nid, flag);
        return NULL;
    }

    hpage = _devmm_alloc_hpage(nid, flag);
    if (hpage == NULL) {
        devmm_huge_free_mem_size_add(attr->devid, attr->vfid, nid, 1, flag);
    }

    return hpage;
}

static struct page *_devmm_alloc_huge_page(struct devmm_phy_addr_attr *attr,
    int *latest_nid, int nids[], u32 nid_num, u32 flag)
{
    struct page *hpage = NULL;
    int i;

    hpage = devmm_alloc_hpage(attr, *latest_nid, flag);
    if (hpage != NULL) {
        return hpage;
    }

    for (i = 0; i < nid_num; i++) {
        if (nids[i] == *latest_nid) {
            continue;
        }

        hpage = devmm_alloc_hpage(attr, nids[i], flag);
        if (hpage != NULL) {
            *latest_nid = nids[i];
            return hpage;
        }
    }

    return NULL;
}

static struct page *devmm_alloc_huge_page(struct devmm_phy_addr_attr *attr, int *latest_nid, int nids[], u32 nid_num)
{
#ifndef EMU_ST
    struct page *hpage = NULL;

    hpage = _devmm_alloc_huge_page(attr, latest_nid, nids, nid_num, HUGETLB_ALLOC_NORMAL);
    if (hpage != NULL) {
        return hpage;
    }

    hpage = _devmm_alloc_huge_page(attr, latest_nid, nids, nid_num, HUGETLB_ALLOC_BUDDY | HUGETLB_ALLOC_NORECLAIM);
    if (hpage != NULL) {
        return hpage;
    }

    /* If alloc failed, allocing by buddy will reclaim mem which will takes a lot of time. */
    return _devmm_alloc_huge_page(attr, latest_nid, nids, nid_num, HUGETLB_ALLOC_BUDDY);
#else
    return _devmm_alloc_huge_page(attr, latest_nid, nids, nid_num, HUGETLB_ALLOC_NORMAL);
#endif
}

static int devmm_alloc_huge_pages(struct devmm_phy_addr_attr *attr,
    int nids[], u32 nid_num, struct page **hpages, u64 pg_num)
{
    int latest_nid = nids[0];    /* alloc from latest nid to approve performance */
    u64 i;

    for (i = 0; i < pg_num; i++) {
        hpages[i] = devmm_alloc_huge_page(attr, &latest_nid, nids, nid_num);
        if (hpages[i] == NULL) {
            devmm_free_huge_pages(attr, hpages, i);
            return -ENOMEM;
        }
    }

    return 0;
}

int devmm_alloc_pages(struct devmm_phy_addr_attr *attr, struct page **pages, u64 pg_num)
{
    int nids[DEVMM_MAX_NUMA_NUM_OF_PER_DEV] = {0};
    u32 nid_num = DEVMM_MAX_NUMA_NUM_OF_PER_DEV;
    int ret;

    ret = devmm_get_nids(attr->devid, attr->vfid, attr->mem_type, nids, &nid_num);
    if (ret != 0) {
        devmm_drv_err("Get nids failed. (ret=%d; devid=%u; vfid=%u; mem_type=%u)\n",
            ret, attr->devid, attr->vfid, attr->mem_type);
        return ret;
    }

    if (attr->pg_type == DEVMM_HUGE_PAGE_TYPE) {
        ret = devmm_alloc_huge_pages(attr, nids, nid_num, pages, pg_num);
    } else {
        ret = devmm_alloc_normal_pages(attr, nids, nid_num, pages, pg_num);
    }
    if (ret != 0) {
        devmm_print_nodes_info(attr->devid, attr->vfid, attr->mem_type);
    }

    return ret;
}

void devmm_put_normal_page(struct page *pg)
{
    devmm_page_ref_dec(pg, PAGE_SIZE, put_page);
}

void devmm_put_huge_page(struct page *hpage)
{
    devmm_page_ref_dec(hpage, HPAGE_SIZE, put_page);
}
