/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved.
 * Description: To improve the compression ratio of squashfs, the new mksquashfsex user-mode tool is used to modify
 * the compression mode to create a new file system ext-squashfs,
 * The function of interpreting ext-squashfs is added to the kernel squashfs module as follows.
 * Author: wangzirui
 * Create: 2022-08-05
 */
#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
#include <linux/path.h>
#include <linux/backing-dev.h>
#include <linux/proc_fs.h>
#include <linux/uaccess.h>
#include <linux/string.h>
#include <linux/crc32.h>

#include "squashfs_ex.h"
#include "squashfs_fs.h"
#include "squashfs_fs_i.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "page_actor.h"
#include "decompressor.h"

struct cache_ctx {
	bool changed;
	unsigned int block_num;
	rwlock_t lock;
	unsigned int max_block_num;
} g_cache_ctx;

struct proc_ctx {
	struct proc_dir_entry *parent;
	struct proc_dir_entry *file;
} g_proc_ctx;

static bool ex_block_small(u32 block_info)
{
	return !!(block_info & SQUASHFS_EX_SMALL_BLOCK_BIT);
}

static bool ex_block_compressed(u32 block_info)
{
	return !!(block_info & SQUASHFS_EX_COMPRESSED_BIT);
}

static u32 ex_block_length(u32 block_info)
{
	return block_info & 0x3FFFFFFFU;
}

static int page_reader_list_size(struct list_head *list)
{
	int size = 0;
	struct page_reader *reader = NULL;

	list_for_each_entry(reader, list, node_handle) {
		++size;
	}

	return size;
}

static struct page_reader *page_reader_list_pop(struct list_head *list)
{
	struct page_reader *reader = list_first_entry_or_null(list,
		struct page_reader, node_handle);

	if (reader)
		list_del_init(&reader->node_handle);

	return reader;
}

static struct page_reader *page_reader_list_erase(struct list_head *list, struct page_reader *node)
{
	struct page_reader *next = list_next_entry(node, node_handle);

	list_del_init(&node->node_handle);
	kfree(node);
	if (list_empty(list))
		next = NULL;
	return next;
}

static void page_reader_list_free(struct list_head *reader_list)
{
	struct page_reader *reader = NULL;

	if (!reader_list)
		return;

	while ((reader = page_reader_list_pop(reader_list)) != NULL)
		kfree(reader);
	kfree(reader_list);
}

static u32 indicate_target_chunk(struct chunk_info *chunk_info, u64 *fpos)
{
	struct chunk *chunk = NULL;
	u32 i = 0;
	while (i < chunk_info->count) {
		chunk = &chunk_info->chunks[i];
		if ((u64)(chunk->size) > *fpos)
			break;
		*fpos -= chunk->size;
		++i;
	}

	return i;
}

static int page_reader_fetch(struct list_head *reader_list, struct ex_super_block_info *ex_sb_info,
	struct chunk_info *chunk_info, pgoff_t page_index)
{
	struct page_reader *reader = NULL;
	struct chunk *chunk = NULL;
	u32 remaining = PAGE_SIZE;
	u64 fpos = page_index * PAGE_SIZE;
	u32 target_chunk_index = indicate_target_chunk(chunk_info, &fpos);

	while (target_chunk_index < chunk_info->count && remaining > 0) {
		chunk = &chunk_info->chunks[target_chunk_index];

		reader = kzalloc(sizeof(struct page_reader), GFP_KERNEL);
		if (!reader) {
			ERROR("page reader alloc failed\n");
			return -ENOMEM;
		}

		reader->page_index = page_index;
		reader->page_offset = PAGE_SIZE - remaining;
		reader->block_id = chunk->block;
		reader->block_addr = ex_sb_info->block_addr_list[reader->block_id];
		reader->block_info = ex_sb_info->block_info_list[reader->block_id];
		reader->block_offset = chunk->offset + fpos;

		if (chunk->size - fpos > remaining) {
			reader->read_size = remaining;
			fpos = chunk->size - remaining;
		} else {
			reader->read_size = chunk->size - fpos;
			++target_chunk_index;
			fpos = 0;
		}
		remaining -= reader->read_size;

		list_add_tail(&reader->node_handle, reader_list);
	}

	return 0;
}

static bool page_reader_done(struct list_head *reader_list, struct page_reader *cur)
{
	struct list_head *handle = NULL;

	if (WARN(list_empty(reader_list), "Reader list empty\n"))
		return true;

	if (cur->page_offset + cur->read_size == PAGE_SIZE ||
	    cur == list_last_entry(reader_list, struct page_reader, node_handle))
		return true;

	list_for_each(handle, reader_list) {
		struct page_reader *reader = list_entry(handle, struct page_reader, node_handle);
		if (reader == cur)
			continue;

		if (reader->page_index == cur->page_index) {
			if (reader->block_id != cur->block_id)
				continue;
			else
				return false;
		}
	}

	return true;
}

static pgoff_t page_reader_get_start_page_index(struct ex_super_block_info *ex_sb_info, struct chunk_info *chunk_info,
	 u32 block_index)
{
	struct chunk *chunk = NULL;
	u32 chunk_index = 0;
	u64 fpos = 0;
	pgoff_t page_index = 0;
	u64 page_off = 0;

	while (chunk_index < chunk_info->count) {
		chunk = &chunk_info->chunks[chunk_index];
		if (block_index == chunk->block)
			break;

		fpos += chunk->size;
		chunk_index++;
	}

	page_index = fpos / PAGE_SIZE;
	page_off = fpos - page_index * PAGE_SIZE;

	return page_off == 0 ? page_index : page_index + 1;
}

static pgoff_t page_reader_get_end_page_index(struct ex_super_block_info *ex_sb_info, struct chunk_info *chunk_info,
	 u32 block_index, loff_t file_size)
{
	struct chunk *chunk = NULL;
	int chunk_index = chunk_info->count - 1;
	u64 fpos = file_size;
	pgoff_t page_index = 0;
	u64 page_off = 0;

	while (chunk_index >= 0) {
		chunk = &chunk_info->chunks[chunk_index];
		if (block_index == chunk->block)
			break;

		fpos -= chunk->size;
		chunk_index--;
	}

	page_index = fpos / PAGE_SIZE;
	page_off = fpos - page_index * PAGE_SIZE;

	return page_off == 0 ? page_index - 1 : page_index;
}

static struct list_head *generate_page_reader_list(struct ex_super_block_info *ex_sb_info,
	struct chunk_info *chunk_info, u64 page_index, loff_t file_size)
{
	int res;
	pgoff_t start_page_index;
	pgoff_t end_page_index;
	pgoff_t index;
	struct list_head *reader_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
	if (!reader_list) {
		ERROR("failed to alloc reader list\n");
		return NULL;
	}

	INIT_LIST_HEAD(reader_list);

	res = page_reader_fetch(reader_list, ex_sb_info, chunk_info, page_index);
	if (res != 0) {
		ERROR("page_reader_fetch error 1\n");
		page_reader_list_free(reader_list);
		return NULL;
	}

	if (list_empty(reader_list))
		return reader_list;

	start_page_index = page_reader_get_start_page_index(ex_sb_info, chunk_info,
		list_first_entry(reader_list, struct page_reader, node_handle)->block_id);
	end_page_index = page_reader_get_end_page_index(ex_sb_info, chunk_info,
		list_last_entry(reader_list, struct page_reader, node_handle)->block_id, file_size);
	for (index = start_page_index; index <= end_page_index; ++index) {
		if (index == page_index)
			continue;

		res = page_reader_fetch(reader_list, ex_sb_info, chunk_info, index);
		if (res != 0) {
			ERROR("page_reader_fetch error 2\n");
			page_reader_list_free(reader_list);
			return NULL;
		}
	}

	return reader_list;
}

/*
 * Read raw chunk table and parse it.
 * Note that sb->s_fs_info->ex_sb_info was not populated yet,
 * so we need it as a seperated param.
 */
static u32 *read_chunk_table(struct super_block *sb,
			     struct ex_super_block_info *ex_sb_info)
{
	__le32 *raw_table = NULL;
	u32 *chunk_table = NULL;
	u32 i;
	u64 byte_length = ex_sb_info->chunk_table_size * sizeof(__le32);

	raw_table = squashfs_read_table(sb, ex_sb_info->chunk_table_start,
		byte_length);
	if (IS_ERR(raw_table))
		return raw_table;

	chunk_table = kmalloc(byte_length, GFP_KERNEL);
	if (!chunk_table) {
		kfree(raw_table);
		return ERR_PTR(-ENOMEM);
	}
	for (i = 0; i < ex_sb_info->chunk_table_size; i++) {
		chunk_table[i] = le32_to_cpu(raw_table[i]);

		/*
		 * Chunk table item should always be monotonic increasing,
		 * and smaller then chunk_count.
		 */
		if (chunk_table[i] >= ex_sb_info->chunks_num ||
		    (i != 0 && chunk_table[i] < chunk_table[i - 1])) {
			ERROR("Chunk table malformed.\n");
			kfree(raw_table);
			kfree(chunk_table);
			return ERR_PTR(-EINVAL);
		}
	}

	kfree(raw_table);
	return chunk_table;
}

void squashfs_ex_put_super(struct ex_super_block_info *ex_sb_info)
{
	if (ex_sb_info == NULL)
		return;

	if (ex_sb_info->read_page) {
		squashfs_cache_delete(ex_sb_info->read_page);
		ex_sb_info->read_page = NULL;
	}

	if (ex_sb_info->small_read_page) {
		squashfs_cache_delete(ex_sb_info->small_read_page);
		ex_sb_info->small_read_page = NULL;
	}

	if (ex_sb_info->block_info_list) {
		kfree(ex_sb_info->block_info_list);
		ex_sb_info->block_info_list = NULL;
	}

	if (ex_sb_info->block_addr_list) {
		kfree(ex_sb_info->block_addr_list);
		ex_sb_info->block_addr_list = NULL;
	}

	if (ex_sb_info->chunk_table) {
		kfree(ex_sb_info->chunk_table);
		ex_sb_info->chunk_table = NULL;
	}

	kfree(ex_sb_info);
}

static int parse_raw_block_info(struct ex_super_block_info *ex_sb_info,
				__le32 *raw_block_info_list,
				u64 offset_max)
{
	u64 i;

	// These are freed in squashfs_ex_put_super() on failed.
	ex_sb_info->block_addr_list = kzalloc(sizeof(u64) * ex_sb_info->blocks, GFP_KERNEL);
	if (!ex_sb_info->block_addr_list) {
		ERROR("kzalloc failed, size:%llu\n", sizeof(u64) * ex_sb_info->blocks);
		return -ENOMEM;
	}

	ex_sb_info->block_info_list = kzalloc(sizeof(u32) * ex_sb_info->blocks, GFP_KERNEL);
	if (!ex_sb_info->block_info_list) {
		ERROR("kzalloc failed, size:%llu\n", sizeof(u32) * ex_sb_info->blocks);
		return -ENOMEM;
	}

	ex_sb_info->block_addr_list[0] = ex_sb_info->blocks_start;

	for (i = 0; i < ex_sb_info->blocks; ++i) {
		u64 last_offset;
		u32 block_info = le32_to_cpu(raw_block_info_list[i]);

		if (ex_block_length(block_info) > ex_sb_info->block_size) {
			ERROR("Invalid block size.\n");
			return -EINVAL;
		}

		ex_sb_info->block_info_list[i] = block_info;

		last_offset = ex_sb_info->block_addr_list[i] +
		    ex_block_length(ex_sb_info->block_info_list[i]);
		if (last_offset > offset_max) {
			ERROR("Block content out of archive range.\n");
			return -EINVAL;
		}

		if (i == 0)
			continue;
		ex_sb_info->block_addr_list[i] = ex_sb_info->block_addr_list[i - 1] +
			ex_block_length(ex_sb_info->block_info_list[i - 1]);
	}
	return 0;
}

static int squashfs_ex_get_cache_num(struct inode *bd_inode, u32 block_size)
{
	loff_t image_size = i_size_read(bd_inode);
	u64 result;

	if (image_size < SQUASHFS_EX_CACHE_MAX)
		result = ((image_size / block_size) > 0) ? (image_size / block_size) : 1;
	else
		result = SQUASHFS_EX_CACHE_MAX / block_size;

	return (int)result;
}

static void update_cache_info(unsigned int block_num)
{
	write_lock(&g_cache_ctx.lock);
	if (block_num > g_cache_ctx.block_num) {
		g_cache_ctx.block_num = block_num;
		g_cache_ctx.max_block_num = g_cache_ctx.block_num;
	}
	write_unlock(&g_cache_ctx.lock);
}

static struct ex_super_block *fill_super_init(struct super_block *sb,
					      u64 ex_sblk_start)
{
	struct ex_super_block *ex_sblk = NULL;
	u32 checksum, read_checksum;

	ex_sblk = squashfs_read_table(sb, ex_sblk_start, sizeof(*ex_sblk));
	if (IS_ERR(ex_sblk)) {
		ERROR("failed to read squashfs_ex_super_block\n");
		return ex_sblk;
	}

	if (le64_to_cpu(ex_sblk->magic) != EXSQUASHFS_MAGIC) {
		ERROR("Bad ex-squashfs magic number.\n");
		kfree(ex_sblk);
		return ERR_PTR(-EINVAL);
	}

	read_checksum = le32_to_cpu(ex_sblk->checksum);
	ex_sblk->checksum = 0;
	checksum = crc32(0, ex_sblk, sizeof(*ex_sblk));
	if (read_checksum != checksum) {
		ERROR("CRC32 failed: expected 0x%x, get 0x%x\n", checksum,
			read_checksum);
		kfree(ex_sblk);
		return ERR_PTR(-EINVAL);
	}

	TRACE("squashfs_ex_super_block info: "
		"block_size_log2:%u, small_block_size_log2:%u, "
		"compression:%u, flags:%u, major:%u, minor:%u, "
		"chunk_table_start:%llx, chunks_start:%llx,"
		"block_start:%llx, blocks:%llx\n",
		le16_to_cpu(ex_sblk->block_size_log2),
		le16_to_cpu(ex_sblk->small_block_size_log2),
		le16_to_cpu(ex_sblk->compression),
		le16_to_cpu(ex_sblk->flags),
		le16_to_cpu(ex_sblk->s_major),
		le16_to_cpu(ex_sblk->s_minor),
		le64_to_cpu(ex_sblk->chunk_table_start),
		le64_to_cpu(ex_sblk->chunks_start),
		le64_to_cpu(ex_sblk->blocks_start),
		le64_to_cpu(ex_sblk->blocks));

	return ex_sblk;
}

static int fill_super_ex_no_data(struct super_block *sb,
			  const struct ex_super_block *ex_sblk,
			  struct ex_super_block_info *ex_sb_info)
{
	u32 chunk_table_iter;
	struct squashfs_sb_info *msblk = sb->s_fs_info;

	/*
	 * All file data are within old squashfs format, and there is
	 * zero-lengthed file or no file at all thus no cache,
	 * no block_info, no block_addr, no chunk, no read_page, no
	 * small_read_page, no chunk in inode, but maybe chunk table.
	 * We will validate if this is true.
	 *
	 * All absent field were marked NULL by kzalloc, so we won't
	 * touch it here.
	 */

	ex_sb_info->chunk_table_start = le64_to_cpu(ex_sblk->chunk_table_start);
	ex_sb_info->chunks_start = le64_to_cpu(ex_sblk->chunks_start);
	ex_sb_info->blocks_start = le64_to_cpu(ex_sblk->blocks_start);

	if (ex_sb_info->chunk_table_start >= msblk->bytes_used ||
		ex_sb_info->chunks_start >= msblk->bytes_used ||
		ex_sb_info->blocks_start >= msblk->bytes_used) {
		ERROR("Image segment start address out of bound.\n");
		return -EINVAL;
	}

	if (ex_sb_info->chunks_start != ex_sb_info->blocks_start) {
		ERROR("Image reported no datablock, but chunks were found.\n");
		return -EINVAL;
	}

	if (ex_sb_info->chunk_table_start > ex_sb_info->chunks_start) {
		ERROR("Bad segment layout.\n");
		return -EINVAL;
	}
	ex_sb_info->chunk_table_size = (ex_sb_info->chunks_start -
		ex_sb_info->chunk_table_start) / sizeof(__le32) - 1;

	/* No chunk table: No file in ex_squashfs format */
	if (ex_sb_info->chunk_table_size == 0)
		return 0;

	/*
	 * There is indeed chunk table, but it should be all zero since
	 * there is only zero-lengthed files. Validate this.
	 * Since __le16(0) == __be16(0) == 0, there is no point
	 * converting endianness.
	 */
	ex_sb_info->chunk_table = squashfs_read_table(sb, ex_sb_info->chunk_table_start,
		ex_sb_info->chunk_table_size * sizeof(__le32));
	if (IS_ERR(ex_sb_info->chunk_table)) {
		int err = PTR_ERR(ex_sb_info->chunk_table);

		ERROR("squashfs_read_table failed with %d.\n", err);
		ex_sb_info->chunk_table = NULL;
		return err;
	}
	for (chunk_table_iter = 0;
		chunk_table_iter < ex_sb_info->chunk_table_size;
		chunk_table_iter++) {
		if (ex_sb_info->chunk_table[chunk_table_iter] != 0) {
			ERROR("Empty image's chunk_table should be all zero.\n");
			return -EINVAL;
		}
	}

	return 0;
}

static int fill_super_block_info(struct super_block *sb, u64 ex_sblk_start,
				 const struct ex_super_block *ex_sblk,
				 struct ex_super_block_info *ex_sb_info)
{
	u16 block_size_log2;
	u16 small_block_size_log2;
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	__le32 *raw_block_info_list = NULL;
	int parse_res;

	block_size_log2 = le16_to_cpu(ex_sblk->block_size_log2);
	if (block_size_log2 > EX_MAX_BLOCK_LOG ||
	    block_size_log2 < EX_MIN_BLOCK_LOG) {
		ERROR("Block size out of range.\n");
		return -EINVAL;
	}
	ex_sb_info->block_size = 1U << block_size_log2;

	if (ex_sb_info->flags & SQUASHFS_EX_SMALL_BLOCK_FLAG) {
		small_block_size_log2 = le16_to_cpu(ex_sblk->small_block_size_log2);
		if (small_block_size_log2 > EX_MAX_SMALL_BLOCK_LOG ||
		    small_block_size_log2 < EX_MIN_SMALL_BLOCK_LOG) {
			ERROR("Block size out of range.\n");
			return -EINVAL;
		}
		ex_sb_info->small_block_size = 1U << small_block_size_log2;
	}

	ex_sb_info->blocks_start = le64_to_cpu(ex_sblk->blocks_start);
	if (ex_sb_info->blocks_start > msblk->bytes_used) {
		ERROR("Block section offset out of range");
		return -EINVAL;
	}
	ex_sb_info->blocks = le64_to_cpu(ex_sblk->blocks);

	if (ex_sb_info->blocks == 0)
		return fill_super_ex_no_data(sb, ex_sblk, ex_sb_info);

	raw_block_info_list = squashfs_read_table(sb, ex_sblk_start + sizeof(*ex_sblk),
		ex_sb_info->blocks * sizeof(__le32));
	if (IS_ERR(raw_block_info_list)) {
		ERROR("failed to read block size list\n");
		return PTR_ERR(raw_block_info_list);
	}

	parse_res = parse_raw_block_info(ex_sb_info, raw_block_info_list, msblk->bytes_used);

	kfree(raw_block_info_list);
	raw_block_info_list = NULL;

	return parse_res;
}

static int fill_super_caches(struct super_block *sb,
			     const struct ex_super_block *ex_sblk,
			     struct ex_super_block_info *ex_sb_info)
{
	int cache_num = squashfs_ex_get_cache_num(sb->s_bdev->bd_inode, ex_sb_info->block_size);
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	if (cache_num < 0) {
		ERROR("Bad cache num config.\n");
		return cache_num;
	}
	ex_sb_info->read_page =	squashfs_cache_init("ex_data", cache_num,
		ex_sb_info->block_size);
	if (ex_sb_info->read_page == NULL) {
		ERROR("Failed to init read_page cache\n");
		return -ENOMEM;
	}
	spin_lock_init(&ex_sb_info->read_page_lock);
	update_cache_info(ex_sb_info->read_page->entries);

	if (ex_sb_info->flags & SQUASHFS_EX_SMALL_BLOCK_FLAG) {
		ex_sb_info->small_read_page =
			squashfs_cache_init("ex_small_data", msblk->thread_ops->max_decompressors(), ex_sb_info->small_block_size);
		if (ex_sb_info->small_read_page == NULL) {
			ERROR("Failed to init ex_small_data cache\n");
			return -ENOMEM;
		}
	}

	return 0;
}

static int fill_super_chunks(struct super_block *sb,
			     const struct ex_super_block *ex_sblk,
			     struct ex_super_block_info *ex_sb_info)
{
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	u64 chunks_length;

	ex_sb_info->chunks_start = le64_to_cpu(ex_sblk->chunks_start);
	if (ex_sb_info->chunks_start >= msblk->bytes_used ||
	    ex_sb_info->blocks_start <= ex_sb_info->chunks_start) {
		ERROR("ex-squashfs image layout malformed: block before chunks");
		return -EINVAL;
	}
	chunks_length = ex_sb_info->blocks_start - ex_sb_info->chunks_start;
	ex_sb_info->chunks_num = chunks_length / sizeof(struct chunk);

	ex_sb_info->chunk_table_start = le64_to_cpu(ex_sblk->chunk_table_start);
	if (ex_sb_info->chunk_table_start >= msblk->bytes_used ||
	    ex_sb_info->chunks_start <= ex_sb_info->chunk_table_start) {
		ERROR("ex-squashfs image layout malformed: chunk table before chunks");
		return -EINVAL;
	}

	ex_sb_info->chunk_table_size = (ex_sb_info->chunks_start -
		ex_sb_info->chunk_table_start) / sizeof(__le32) - 1;
	ex_sb_info->chunk_table = read_chunk_table(sb, ex_sb_info);
	if (IS_ERR(ex_sb_info->chunk_table)) {
		int err = PTR_ERR(ex_sb_info->chunk_table);

		ERROR("failed to read chunk table, error code:%d\n", err);
		ex_sb_info->chunk_table = NULL;
		return err;
	}

	return 0;
}

struct ex_super_block_info *squashfs_ex_fill_super(struct super_block *sb)
{
	struct ex_super_block_info *ex_sb_info = NULL;
	struct ex_super_block *ex_sblk = NULL;
	struct squashfs_sb_info *msblk = NULL;
	u64 ex_sblk_start, origin_bytes_used;
	int err;

	msblk = sb->s_fs_info;
	origin_bytes_used = msblk->bytes_used;
	msblk->bytes_used = i_size_read(sb->s_bdev->bd_inode);
	ex_sblk_start = round_up(origin_bytes_used, SQUASHFS_ALIGN_MASK);

	ex_sb_info = kzalloc(sizeof(*ex_sb_info), GFP_KERNEL);
	if (ex_sb_info == NULL) {
		ERROR("Failed to zalloc ex_sb_info\n");
		err = -ENOMEM;
		goto read_failed;
	}

	ex_sblk = fill_super_init(sb, ex_sblk_start);
	if (IS_ERR(ex_sblk)) {
		ERROR("failed to read squashfs_ex_super_block\n");
		err = PTR_ERR(ex_sblk);
		ex_sblk = NULL;
		goto read_failed;
	}

	ex_sb_info->flags = le16_to_cpu(ex_sblk->flags);

	err = fill_super_block_info(sb, ex_sblk_start, ex_sblk, ex_sb_info);
	if (err != 0)
		goto read_failed;

	// Skip cache init & chunk init when there is no block (data) in image.
	if (ex_sb_info->blocks == 0)
		goto out;

	err = fill_super_caches(sb, ex_sblk, ex_sb_info);
	if (err != 0)
		goto read_failed;

	err = fill_super_chunks(sb, ex_sblk, ex_sb_info);
	if (err != 0)
		goto read_failed;

out:
	kfree(ex_sblk);
	msblk->bytes_used = origin_bytes_used;
	return ex_sb_info;

read_failed:
	msblk->bytes_used = origin_bytes_used;
	squashfs_ex_put_super(ex_sb_info);
	kfree(ex_sblk);
	return ERR_PTR(err);
}

static int copy_bio_list_to_actor(struct bio_list *bio_list, struct squashfs_page_actor *actor, int req_offset,
	int req_length)
{
	void *actor_addr = squashfs_first_page(actor);
	int copied = 0;
	int actor_offset = 0;
	struct bio *bio;
	int cur_offset = req_offset;

	bio_list_for_each(bio, bio_list) {
		struct bvec_iter_all iter_all = {};
		struct bio_vec *bvec = bvec_init_iter_all(&iter_all);

		if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all)))
			continue;

		while (copied < req_length) {
			int avail = min_t(int, bvec->bv_len - cur_offset, PAGE_SIZE - actor_offset);

			memcpy(actor_addr + actor_offset, page_address(bvec->bv_page) + bvec->bv_offset + cur_offset, avail);

			actor_offset += avail;
			copied += avail;
			cur_offset += avail;

			if (actor_offset >= PAGE_SIZE) {
				actor_addr = squashfs_next_page(actor);
				if (!actor_addr && copied < req_length)
					goto error;
				actor_offset = 0;
			}

			if (cur_offset >= bvec->bv_len) {
				cur_offset = 0;
				if (!bio_next_segment(bio, &iter_all))
					break;
			}
		}
	}

	squashfs_finish_page(actor);
	return copied;

error:
	squashfs_finish_page(actor);
	return -ENOMEM;
}

static int squashfs_ex_bio_read(struct super_block *sb, u64 index, int length, struct bio_list *bio_list,
	int *block_offset)
{
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	const u64 read_start = round_down(index, msblk->devblksize);
	sector_t block = read_start >> msblk->devblksize_log2;
	const u64 read_end = round_up(index + length, msblk->devblksize);
	const sector_t block_end = read_end >> msblk->devblksize_log2;
	int total_len = (block_end - block) << msblk->devblksize_log2;
	int page_count = DIV_ROUND_UP(total_len, PAGE_SIZE);
	int error, i;
	struct bio *bio = NULL;
	unsigned int count;
	unsigned int len;

	while (page_count > 0) {
		count = page_count >= BIO_MAX_PAGES ? BIO_MAX_PAGES : page_count;
		bio = bio_alloc(GFP_NOIO, count);
		if (!bio) {
			error = -ENOMEM;
			goto out_free_bio;
		}

		bio_set_dev(bio, sb->s_bdev);
		bio->bi_opf = REQ_OP_READ;
		bio->bi_iter.bi_sector = block * ((unsigned int)msblk->devblksize >> SECTOR_SHIFT);

		for (i = 0; i < count; ++i) {
			struct page *page = alloc_page(GFP_NOIO);
			len = min_t(unsigned int, PAGE_SIZE, total_len);

			if (!page) {
				error = -ENOMEM;
				goto out_free_bio;
			}
			if (!bio_add_page(bio, page, len, 0)) {
				error = -EIO;
				__free_pages(page, 0);
				goto out_free_bio;
			}
			total_len -= len;
		}

		page_count -= count;
		block += (count * PAGE_SIZE) >> msblk->devblksize_log2;

		error = submit_bio_wait(bio);
		if (error) {
			bio_free_pages(bio);
			bio_put(bio);
			goto out_free_bio;
		}
		bio_list_add(bio_list, bio);
	}

	*block_offset = index & ((1U << msblk->devblksize_log2) - 1);
	return 0;

out_free_bio:
	while ((bio = bio_list_pop(bio_list)) != NULL) {
		bio_free_pages(bio);
		bio_put(bio);
	}
	return error;
}

int read_datablock(struct super_block *sb, u64 index, u32 block_info, u64 *next_index,
	struct squashfs_page_actor *output)
{
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	struct bio_list bio_list;
	struct bio *bio = NULL;
	int compressed;
	int res;
	int offset;
	u32 length;

	compressed = ex_block_compressed(block_info);
	length = ex_block_length(block_info);
	TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n", index, compressed ? "" : "un", length, output->length);

	if (next_index)
		*next_index = index + length;

	bio_list_init(&bio_list);
	res = squashfs_ex_bio_read(sb, index, length, &bio_list, &offset);
	if (res)
		goto out;

	if (compressed) {
		if (!msblk->stream) {
			res = -EIO;
			goto out_free_bio;
		}

		bio = bio_list.head;
		bio->bi_private = &bio_list;
		res = msblk->thread_ops->decompress(msblk, bio, offset, length, output);
	} else {
		res = copy_bio_list_to_actor(&bio_list, output, offset, length);
	}

out_free_bio:
	while ((bio = bio_list_pop(&bio_list)) != NULL) {
		bio_free_pages(bio);
		bio_put(bio);
	}
out:
	if (res < 0)
		ERROR("Failed to read block 0x%llx: %d\n", index, res);

	return res;
}

#if !defined(CONFIG_SQUAHSFS_FILE_DIRECT) || defined(CONFIG_RTOS_EXTEND_SQUASHFS_FORCE_FILE_CACHE)

void check_read_page_cache(struct ex_super_block_info *ex_sb_info, struct squashfs_sb_info *msblk)
{
	struct squashfs_cache *new_cache = NULL;

	read_lock(&g_cache_ctx.lock);
	if (!g_cache_ctx.changed || ex_sb_info->read_page->entries == g_cache_ctx.block_num ||
		(g_cache_ctx.block_num * ex_sb_info->read_page->block_size) > msblk->bytes_used) {
		read_unlock(&g_cache_ctx.lock);
		return;
	}

	spin_lock(&ex_sb_info->read_page_lock);
	new_cache = squashfs_cache_init("ex_data", g_cache_ctx.block_num, ex_sb_info->read_page->block_size);
	if (new_cache == NULL) {
		ERROR("Failed to change read_page_cache\n");
		spin_unlock(&ex_sb_info->read_page_lock);
		read_unlock(&g_cache_ctx.lock);
		return;
	}

	squashfs_cache_delete(ex_sb_info->read_page);
	ex_sb_info->read_page = new_cache;

	TRACE("Change read_page_cache size:%u, block_num:%u\n",
		g_cache_ctx.block_num * ex_sb_info->read_page->block_size,
		g_cache_ctx.block_num);
	spin_unlock(&ex_sb_info->read_page_lock);
	read_unlock(&g_cache_ctx.lock);
}

static void write_pages(struct page *page, struct squashfs_cache_entry *buffer,
	struct list_head *reader_list, u32 block_id)
{
	struct page_reader *reader = NULL;
	void *page_addr = NULL;
	int copied;
	struct page *target_page = NULL;
	bool next_page = true;

	reader = list_first_entry_or_null(reader_list, struct page_reader, node_handle);
	while (reader) {
		if (reader->block_id != block_id) {
			if (list_is_last(&reader->node_handle, reader_list))
				break;
			reader = list_next_entry(reader, node_handle);
			continue;
		}

		if (next_page) {
			target_page = (reader->page_index == page->index) ?
				page : grab_cache_page_nowait(page->mapping, reader->page_index);
			if (!target_page) {
				reader = page_reader_list_erase(reader_list, reader);
				continue;
			}

			page_addr = kmap_atomic(target_page);
			next_page = false;
			if (!PageUptodate(target_page))
				memset(page_addr, 0, PAGE_SIZE);
		}

		copied = squashfs_copy_data(page_addr + reader->page_offset, buffer, reader->block_offset, reader->read_size);

		if (page_reader_done(reader_list, reader)) {
			kunmap_atomic(page_addr);
			flush_dcache_page(target_page);
			if (copied == reader->read_size)
				SetPageUptodate(target_page);
			else
				SetPageError(target_page);

			unlock_page(target_page);

			if (reader->page_index != page->index)
				put_page(target_page);

			next_page = true;
		}

		reader = page_reader_list_erase(reader_list, reader);
	}
}

static int readpage_from_block(struct page *page, struct list_head *reader_list)
{
	struct inode *i = page->mapping->host;
	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
	struct ex_super_block_info *ex_sb_info = msblk->ex_sb_info;
	struct squashfs_cache_entry *buffer = NULL;
	struct page_reader *reader = NULL;
	int res;

	while (!list_empty(reader_list)) {
		reader = list_first_entry(reader_list, struct page_reader, node_handle);
		if (ex_block_small(reader->block_info)) {
			buffer = squashfs_cache_get(i->i_sb, ex_sb_info->small_read_page, reader->block_addr, reader->block_info);
		} else {
			check_read_page_cache(ex_sb_info, msblk);
			buffer = squashfs_cache_get(i->i_sb, ex_sb_info->read_page, reader->block_addr, reader->block_info);
		}

		res = buffer->error;
		if (res < 0) {
			ERROR("Unable to read page, block %llx, info %x\n",
			      reader->block_addr,
			      reader->block_info);
			squashfs_cache_put(buffer);
			return res;
		}

		write_pages(page, buffer, reader_list, reader->block_id);
		squashfs_cache_put(buffer);
	}

	return res;
}

#else
static void free_buffer_array(void **buffer_array, int buffer_num)
{
	int i;
	for (i = 0; i < buffer_num; i++) {
		kfree(buffer_array[i]);
		buffer_array[i] = NULL;
	}
	kfree(buffer_array);
}

static int get_buffer_num(struct super_block *sb)
{
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	/* the caller will make sure there is no int overflow */
	if (msblk->ex_sb_info->block_size > PAGE_SIZE)
		return msblk->ex_sb_info->block_size / PAGE_SIZE;
	return 1;
}

static void **alloc_buffer_array(int buffer_num)
{
	int i;
	void **buffer_array = kmalloc_array(buffer_num, sizeof(void *), GFP_KERNEL);
	if (buffer_array == NULL)
		return NULL;

	for (i = 0; i < buffer_num; i++) {
		buffer_array[i] = kmalloc(PAGE_SIZE, GFP_KERNEL);
		if (buffer_array[i] == NULL) {
			free_buffer_array(buffer_array, i);
			return NULL;
		}
	}
	return buffer_array;
}

static void do_fill_page(void **buffer_array, void *pageaddr, struct page_reader *reader)
{
	void *page_start_addr = pageaddr + reader->page_offset;
	int buffer_idx = reader->block_offset / PAGE_SIZE;
	u32 buffer_offset = reader->block_offset % PAGE_SIZE;
	void *buffer_start_addr = buffer_array[buffer_idx] + buffer_offset;
	u32 available_size = PAGE_SIZE - buffer_offset;

	if (reader->read_size > available_size) {
		memcpy(page_start_addr, buffer_start_addr, available_size);
		memcpy(page_start_addr + available_size, buffer_array[buffer_idx + 1], reader->read_size - available_size);
	} else {
		memcpy(page_start_addr, buffer_start_addr, reader->read_size);
	}
}

static void fill_pages(struct page *page, void **buffer_array, u32 block_id, struct list_head *reader_list)
{
	void *page_addr = NULL;
	struct page *target_page = NULL;
	struct page_reader *reader = NULL;
	bool next_page = true;

	reader = list_first_entry_or_null(reader_list, struct page_reader, node_handle);
	while (reader) {
		if (reader->block_id != block_id) {
			if (list_is_last(&reader->node_handle, reader_list))
				break;
			reader = list_next_entry(reader, node_handle);
			continue;
		}

		if (next_page) {
			target_page = (reader->page_index == page->index) ?
				page : grab_cache_page_nowait(page->mapping, reader->page_index);
			if (!target_page) {
				reader = page_reader_list_erase(reader_list, reader);
				continue;
			}

			page_addr = kmap_atomic(target_page);
			next_page = false;
			if (!PageUptodate(target_page))
				memset(page_addr, 0, PAGE_SIZE);
		}

		do_fill_page(buffer_array, page_addr, reader);

		if (page_reader_done(reader_list, reader)) {
			kunmap_atomic(page_addr);
			flush_dcache_page(target_page);
			SetPageUptodate(target_page);

			unlock_page(target_page);
			if (reader->page_index != page->index)
				put_page(target_page);

			next_page = true;
		}

		reader = page_reader_list_erase(reader_list, reader);
	}
}

static void padding_last_buffer(int length, void *last_buffer_addr)
{
	int bytes = length % PAGE_SIZE;
	if (bytes > 0)
		memset(last_buffer_addr + bytes, 0, PAGE_SIZE - bytes);
}

static int decompress_block_data(u64 block_addr, u32 block_info, struct super_block *sb, void **buffer_array,
	int buffer_num)
{
	int length;
	struct squashfs_page_actor *actor = squashfs_page_actor_init(buffer_array, buffer_num, 0);
	if (actor == NULL)
		return -ENOMEM;

	length = read_datablock(sb, block_addr, block_info, NULL, actor);
	/* Fill the remaining part of the last page by 0 */
	padding_last_buffer(length, buffer_array[buffer_num - 1]);
	kfree(actor);
	return (length >= 0) ? 0 : -ENOMEM;
}

static int readpage_from_block(struct page *page, struct list_head *reader_list)
{
	/* the caller will make sure this pointer is not NULL */
	struct super_block *sb = page->mapping->host->i_sb;
	struct page_reader *reader = NULL;
	int res;
	int buffer_num = get_buffer_num(sb);
	void **buffer_array = alloc_buffer_array(buffer_num);
	if (buffer_array == NULL)
		return -ENOMEM;

	while (!list_empty(reader_list)) {
		reader = list_first_entry(reader_list, struct page_reader, node_handle);

		res = decompress_block_data(reader->block_addr, reader->block_info, sb, buffer_array, buffer_num);
		if (res != 0) {
			free_buffer_array(buffer_array, buffer_num);
			return res;
		}

		fill_pages(page, buffer_array, reader->block_id, reader_list);
	}

	free_buffer_array(buffer_array, buffer_num);
	return 0;
}
#endif

static struct chunk *parse_raw_chunks(const struct ex_super_block_info *ex_sb_info,
				      const struct raw_chunk *raw_chunks,
				      u32 count,
				      loff_t inode_size)
{
	u32 i;
	int err;
	struct chunk *chunks = NULL;
	loff_t total_size = 0;

	chunks = kmalloc(sizeof(struct chunk) * count, GFP_KERNEL);
	if (!chunks) {
		err = -ENOMEM;
		goto bad;
	}

	for (i = 0; i < count; i++) {
		chunks[i].block = le32_to_cpu(raw_chunks[i].block);
		chunks[i].size = le32_to_cpu(raw_chunks[i].size);
		chunks[i].offset = le32_to_cpu(raw_chunks[i].offset);

		// verify chunk content integrity
		if (chunks[i].block >= ex_sb_info->blocks) {
			ERROR("Block id out of range.\n");
			err = -EIO;
			goto free_chunks;
		}

		if (!ex_block_small(ex_sb_info->block_info_list[chunks[i].block]) &&
		    (u64)chunks[i].offset + (u64)chunks[i].size > ex_sb_info->block_size) {
			ERROR("Chunk block size out of range.\n");
			err = -EIO;
			goto free_chunks;
		}

		if (ex_block_small(ex_sb_info->block_info_list[chunks[i].block]) &&
		    (u64)chunks[i].offset + (u64)chunks[i].size > ex_sb_info->small_block_size) {
			ERROR("Chunk small block size out of range.\n");
			err = -EIO;
			goto free_chunks;
		}
		total_size += chunks[i].size;
	}

	if (total_size != inode_size) {
		ERROR("Inode size and chunk size mismatched.\n");
		err = -EIO;
		goto free_chunks;
	}

	return chunks;

free_chunks:
	kfree(chunks);
bad:
	ERROR("Failed to parse chunk info, errno %d.\n", err);
	return ERR_PTR(err);
}

struct chunk_info *squashfs_ex_get_chunk_info(struct super_block *sb,
					      u32 chunk_table_index,
					      loff_t inode_size)
{
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	struct ex_super_block_info *ex_sb_info = msblk->ex_sb_info;
	u32 chunk_begin;
	u32 chunk_end;
	u64 chunk_start_addr;
	u32 length;
	struct raw_chunk *raw_chunks = NULL;
	struct chunk_info *chunk_info = NULL;

	if (!ex_sb_info) {
		ERROR("Not a ex-squashfs image.\n");
		return ERR_PTR(-EIO);
	}

	if (chunk_table_index >= ex_sb_info->chunk_table_size)
		return ERR_PTR(-EIO);

	chunk_begin = ex_sb_info->chunk_table[chunk_table_index];
	chunk_end = (chunk_table_index == ex_sb_info->chunk_table_size - 1) ?
		ex_sb_info->chunks_num : ex_sb_info->chunk_table[chunk_table_index + 1];

	chunk_info = kmalloc(sizeof(*chunk_info), GFP_KERNEL);
	if (chunk_info == NULL)
		return ERR_PTR(-ENOMEM);

	if (chunk_begin == chunk_end) {
		if (inode_size != 0) {
			ERROR("No chunk found for file with valid length.\n");
			kfree(chunk_info);
			return ERR_PTR(-EIO);
		}
		chunk_info->chunks = NULL;
		chunk_info->count = 0;
		return chunk_info;
	}

	chunk_start_addr = ex_sb_info->chunks_start + chunk_begin * sizeof(struct chunk);
	length = (chunk_end - chunk_begin) * sizeof(struct chunk);

	raw_chunks = squashfs_read_table(sb, chunk_start_addr, length);
	if (IS_ERR(raw_chunks)) {
		kfree(chunk_info);
		return ERR_PTR(PTR_ERR(raw_chunks));
	}

	chunk_info->count = chunk_end - chunk_begin;
	chunk_info->chunks = parse_raw_chunks(ex_sb_info, raw_chunks,
		chunk_info->count, inode_size);
	if (IS_ERR(chunk_info->chunks)) {
		struct chunk_info *to_free = chunk_info;

		chunk_info = ERR_PTR(PTR_ERR(chunk_info->chunks));
		kfree(to_free);
	}
	kfree(raw_chunks);
	return chunk_info;
}

int squashfs_ex_readpage(struct file *file, struct page *page)
{
	int res;
	struct inode *inode = page->mapping->host;
	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
	struct ex_super_block_info *ex_sb_info = msblk->ex_sb_info;
	struct chunk_info *chunk_info = squashfs_i(inode)->chunks_info;
	loff_t size = i_size_read(inode);
	struct list_head *reader_list = generate_page_reader_list(ex_sb_info, chunk_info, page->index, size);

	if (!reader_list)
		goto error;

	if (page_reader_list_size(reader_list) == 0)
		goto out;

	res = readpage_from_block(page, reader_list);
	if (res)
		goto error;
	page_reader_list_free(reader_list);
	return 0;

error:
	SetPageError(page);
out:
	page_reader_list_free(reader_list);
	flush_dcache_page(page);
	if (!PageError(page))
		SetPageUptodate(page);
	unlock_page(page);
	return 0;
}

static int cache_size_show(struct seq_file *seq, void *offset)
{
	seq_printf(seq, "cache block num:%u \n", g_cache_ctx.block_num);
	return 0;
}

static int cache_size_open(struct inode *inode, struct file *file)
{
	return single_open(file, cache_size_show, NULL);
}

static ssize_t cache_size_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos)
{
	char buffer[BUFFER_SIZE] = {0};
	unsigned int block_num;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (len > BUFFER_SIZE - 1)
		return -EINVAL;

	if (copy_from_user(buffer, buf, len))
		return -EFAULT;

	ret = kstrtouint(buffer, 0, &block_num);
	if (ret != 0)
		return ret;

	if (block_num == 0)
		return -EINVAL;

	if (block_num > g_cache_ctx.max_block_num)
		return -EINVAL;

	write_lock(&g_cache_ctx.lock);
	g_cache_ctx.changed = true;
	g_cache_ctx.block_num = block_num;
	write_unlock(&g_cache_ctx.lock);

	return len;
}

static const struct proc_ops ex_cache_size_ops = {
	.proc_flags = PROC_ENTRY_PERMANENT,
	.proc_open = cache_size_open,
	.proc_write = cache_size_write,
	.proc_read = seq_read,
	.proc_lseek = seq_lseek,
	.proc_release = single_release,
};

int __init squashfs_ex_init(void)
{
	g_cache_ctx.changed = false;
	g_cache_ctx.block_num = 0;
	g_cache_ctx.max_block_num = 64;
	rwlock_init(&g_cache_ctx.lock);

	g_proc_ctx.parent = proc_mkdir("squashfs_ex", NULL);
	if (g_proc_ctx.parent == NULL) {
		ERROR("proc_create squashfs_ex failed.\n");
		return -ENOMEM;
	}

	g_proc_ctx.file = proc_create("cache_block_num", (mode_t)0640, g_proc_ctx.parent, &ex_cache_size_ops);
	if (g_proc_ctx.file == NULL) {
		ERROR("proc_create squashfs_ex/cache_block_num failed.\n");
		remove_proc_entry("squashfs_ex", NULL);
		return -ENOMEM;
	}

	return 0;
}

void squashfs_ex_destroy(void)
{
	remove_proc_entry("cache_block_num", g_proc_ctx.parent);
	remove_proc_entry("squashfs_ex", NULL);
	g_proc_ctx.parent = NULL;
	g_proc_ctx.file = NULL;
}
