/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024.
 * Description: CMA-based enhanced interface
 * Author: heyuqiang
 * Create: 2024-05-10
 */
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/cma.h>
#include <linux/crc32.h>
#include <linux/rtos_mem_snapshot.h>
#include "cma.h"

struct new_alloc_mem {
	unsigned long pfn_start;
	unsigned long count;
	struct list_head list;
};

static void clear_cma_head(struct cma *cma, struct persist_mem_block *head_orig)
{
	uint8_t *data_addr;
	int i;

	memset(head_orig, 0, (PAGE_SIZE + cma->bitmap_size));

	head_orig->crc32_bitmap = crc32(0, (const uint8_t *)cma->bitmap, cma->bitmap_size);
	data_addr = (uint8_t *)head_orig + (PAGE_SIZE + cma->bitmap_size);
	for (i = CMA_HEADER_PFN_START; i < cma->count; i++) {
		head_orig->crc32_page = crc32(head_orig->crc32_page, (const uint8_t *)data_addr, PAGE_SIZE);
		data_addr += PAGE_SIZE;
	}
	head_orig->block_valid = CMA_BLOCK_INVALID;
}

static void check_persist_mem_block(struct cma *cma, struct persist_mem_block *head_orig)
{
	int i;
	unsigned int crc32_bitmap = 0;
	unsigned int crc32_page = 0;
	unsigned long data_addr;

	head_orig->block_valid = CMA_BLOCK_VALID;

	if (head_orig->reset_on_reboot) {
		clear_cma_head(cma, head_orig);
		head_orig->reset_on_reboot = false;
		return;
	}

	crc32_bitmap = crc32(0, (const uint8_t *)cma->bitmap, cma->bitmap_size);
	if (crc32_bitmap != head_orig->crc32_bitmap) {
		pr_info("%s():bitmap invalid, bitmap crc32[%x] err, expect[%x]\n",
			__func__, head_orig->crc32_bitmap, crc32_bitmap);
		head_orig->reset_on_reboot = true;
		goto clear;
	}
	/* Check the validity of the CMA data area. */
	data_addr = (uintptr_t)head_orig + (PAGE_SIZE + cma->bitmap_size);

	for (i = CMA_HEADER_PFN_START; i < cma->count; i++) {
		if (bitmap_get_bitvalue(cma->bitmap, i))
			crc32_page = crc32(crc32_page, (const uint8_t *)data_addr, PAGE_SIZE);
		data_addr += PAGE_SIZE;
	}

	if (crc32_page != head_orig->crc32_page) {
		pr_info("%s():page invalid, page crc32[%x] err, expect[%x]\n",
			__func__, head_orig->crc32_page, crc32_page);
		head_orig->reset_on_reboot = true;
	}
clear:
	if (head_orig->reset_on_reboot) {
		clear_cma_head(cma, head_orig);
		head_orig->reset_on_reboot = false;
	}
}

void __init rtos_cma_activate_area(struct cma *cma)
{
	unsigned long base_pfn, pfn, i;
	struct zone *zone;

	if (!cma)
		goto out_error;
	base_pfn = cma->base_pfn;
	pfn = base_pfn;
	i = cma->count >> pageblock_order;
	cma->block = phys_to_virt(PFN_PHYS(base_pfn));
	if (!cma->block)
		goto out_error;
	cma->bitmap = (void *)cma->block + PAGE_SIZE;
	/* 8*PAGE_SIZE page should 1 PAGE_SIZE bitmap, 8 page should 1 byte bitmap */
	cma->bitmap_size = ALIGN(cma->count, BITS_PER_BYTE * PAGE_SIZE) / (BITS_PER_BYTE);

	check_persist_mem_block(cma, cma->block);

	bitmap_set(cma->bitmap, 0, CMA_HEADER_PFN_START);

	WARN_ON_ONCE(!pfn_valid(pfn));
	zone = page_zone(pfn_to_page(pfn));

	do {
		unsigned long j;
		unsigned long *bitmap;

		base_pfn = pfn;
		bitmap = cma->bitmap + (base_pfn - cma->base_pfn) / (sizeof(unsigned long) * BITS_PER_BYTE);
		if ((uintptr_t)bitmap > (uintptr_t)cma->bitmap + cma->bitmap_size) {
			pr_err("%s():error bitmap out of the boundary\n", __func__);
			goto out_error;
		}
		for (j = pageblock_nr_pages; j; --j, pfn++) {
			WARN_ON_ONCE(!pfn_valid(pfn));
			/*
			 * alloc_contig_range requires the pfn range
			 * specified to be in the same zone. Make this
			 * simple by forcing the entire CMA resv range
			 * to be in the same zone.
			 */
			if (page_zone(pfn_to_page(pfn)) != zone)
				goto out_error;
		}
		rtos_init_cma_reserved_pageblock(pfn_to_page(base_pfn), bitmap);
	} while (--i);

	spin_lock_init(&cma->lock);

#ifdef CONFIG_CMA_DEBUGFS
	INIT_HLIST_HEAD(&cma->mem_head);
	spin_lock_init(&cma->mem_head_lock);
#endif

	return;

out_error:
	cma->count = 0;
	pr_err("%s():Error: CMA area %s could not be activated\n", __func__, cma->name);
}

int cma_release_from_count(struct cma *cma, const struct page *pages, unsigned int count)
{
	unsigned long pfn;
	unsigned long start, end, next_zero_bit;

	if (!cma || !pages)
		return -EINVAL;

	pfn = page_to_pfn(pages);
	if (pfn < (cma->base_pfn + CMA_HEADER_PFN_START) ||
		pfn >= cma->base_pfn + cma->count ||
		pfn + count > cma->base_pfn + cma->count)
		return -EINVAL;

	start = pfn - cma->base_pfn;
	end = start + count;

	/* Determine the range of the memory to be cleared, Whether all bitmaps are in use */
	next_zero_bit = find_next_zero_bit(cma->bitmap, end, start);
	if (next_zero_bit < end) {
		pr_err("%s():error 0x%lx pfn has been released!\n",
			__func__, cma->base_pfn + next_zero_bit);
		return -EBUSY;
	}

	if (!rtos_cma_release(cma, pages, count))
		return -EINVAL;

	return 0;
}

int add_new_alloc(struct list_head *head, unsigned long pfn_start, unsigned long count)
{
	struct new_alloc_mem *node;

	node = kmalloc(sizeof(struct new_alloc_mem), GFP_KERNEL);
	if (!node)
		return -ENOMEM;
	node->pfn_start = pfn_start;
	node->count = count;
	list_add(&node->list, head);
	return 0;
}

void failed_release_new_alloc(struct cma *cma, struct list_head *head)
{
	struct new_alloc_mem *node, *node_tmp;

	list_for_each_entry_safe(node, node_tmp, head, list) {
		if (!rtos_cma_release(cma, pfn_to_page(node->pfn_start), node->count))
			pr_err("%s():cma_release failed\n", __func__);
		list_del(&node->list);
		kfree(node);
	}
}

void success_release_list(struct list_head *head)
{
	struct new_alloc_mem *node, *node_tmp;

	list_for_each_entry_safe(node, node_tmp, head, list) {
		list_del(&node->list);
		kfree(node);
	}
}
