#include "oppool.h"

extern struct crypto_ahash *hash_tfm;
extern struct crypto_skcipher *skcipher;

void put_async_op2pool(struct async_op_pool *pool, struct async_op *op)
{
	// struct async_op_pool *pool = tdata->op_pool;
	unsigned long flags;
	bool wakeup = false;

	spin_lock_irqsave(&pool->lock, flags);

	if (op->in_use) {
		op->in_use = false;
		list_add_tail(&op->list, &pool->free_list);
		pool->free_count++;
		wakeup = true;
	}

	spin_unlock_irqrestore(&pool->lock, flags);

	if (wakeup) {
		wake_up_interruptible(&pool->wait_queue);
	}
}

// 哈希操作完成回调
static void async_hash_op_complete(void *data, int error)
{
	if (!data) {
		printk(KERN_ERR "Invalid async request or data\n");
		return;
	}

	if (error == -EINPROGRESS) {
		pr_info("Get EINPROGRESS, op: %p\n", data);
		return;
	}

	struct async_op *op = (struct async_op *)data;
	struct async_op_pool *pool = op->pool;

	// 增加执行次数统计
	atomic64_inc(&pool->executed_ops);

	op->wait.err = error;
	complete(&op->wait.completion);
	put_async_op2pool(pool, op);
}

// 加密操作完成回调
void async_skcipher_op_complete(void *data, int error)
{
	// pr_err("async_skcipher_op_complete, error: %d\n", error);

	if (!data) {
		pr_err(KERN_ERR "Invalid async request or data\n");
		return;
	}

	if (error == -EINPROGRESS)
		return;

	struct async_op *op = (struct async_op *)data;
	struct async_op_pool *pool = op->pool;

	// 增加执行次数统计
	atomic64_inc(&pool->executed_ops);

	op->wait.err = error;
	complete(&op->wait.completion);
	put_async_op2pool(pool, op);
	// struct crypto_wait *wait = data;

	// if (error == -EINPROGRESS)
	// 	return;

	// wait->err = error;
	// complete(&wait->completion);
}

int init_async_op_pool(struct thread_data *tdata)
{
	struct async_op_pool *pool = NULL;
	int i, ret = 0;
	if (!tdata || !tdata->params) {
		printk(KERN_ERR "Invalid thread data or params\n");
		return -EINVAL;
	}

	pool = kmalloc(sizeof(struct async_op_pool), GFP_KERNEL);
	if (!pool) {
		printk(KERN_ERR "Failed to allocate async op pool\n");
		return -ENOMEM;
	}

	tdata->op_pool = pool;

	spin_lock_init(&pool->lock);
	init_waitqueue_head(&pool->wait_queue);
	INIT_LIST_HEAD(&pool->free_list);
	pool->free_count = 0;
	pool->total_count = 0;
	atomic64_set(&pool->executed_ops, 0);

	for (i = 0; i < ASYNC_POOL_SIZE; i++) {
		struct async_op *op = kzalloc(sizeof(struct async_op), GFP_KERNEL);
		if (!op) {
			ret = -ENOMEM;
			goto error;
		}

		op->is_hash = tdata->params->is_hash;
		op->in_use = false;
		INIT_LIST_HEAD(&op->list);

		// 分配源和目标缓冲区
		op->src_buf = kmalloc(tdata->params->data_size, GFP_KERNEL);
		if (!op->src_buf) {
			kfree(op);
			ret = -ENOMEM;
			goto error;
		}
		sg_init_one(&op->sg_src, op->src_buf, tdata->params->data_size);

		get_random_bytes(op->src_buf, tdata->params->data_size);
		op->dst_buf = kmalloc(tdata->params->is_hash ? 256 : tdata->params->data_size,
				      GFP_KERNEL);
		if (!op->dst_buf) {
			kfree(op->src_buf);
			kfree(op);
			ret = -ENOMEM;
			goto error;
		}
		sg_init_one(&op->sg_dst, op->dst_buf,
			    tdata->params->is_hash ? 256 : tdata->params->data_size);

		if (tdata->params->is_hash) {
			op->ahreq = ahash_request_alloc(hash_tfm, GFP_KERNEL);
			if (!op->ahreq) {
				kfree(op);
				ret = -ENOMEM;
				goto error;
			}
			ahash_request_set_callback(op->ahreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
						   async_hash_op_complete, op);
		} else {
			op->skreq = skcipher_request_alloc(skcipher, GFP_KERNEL);
			if (!op->skreq) {
				kfree(op);
				ret = -ENOMEM;
				goto error;
			}
			skcipher_request_set_callback(op->skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
						      async_skcipher_op_complete, op);
		}

		pool->ops[i] = op;
		op->pool = pool;

		// 添加到空闲链表
		spin_lock(&pool->lock);
		list_add_tail(&op->list, &pool->free_list);
		pool->free_count++;
		pool->total_count++;
		spin_unlock(&pool->lock);
	}

	pr_info("Thread %d: async op pool initialized with %d operations\n", tdata->thread_id,
		ASYNC_POOL_SIZE);

	return 0;

error:
	// 清理已分配的操作
	cleanup_async_op_pool(tdata);
	return ret;
}

struct async_op *get_async_op(struct thread_data *tdata, int timeout_ms)
{
	struct async_op_pool *pool = tdata->op_pool;
	struct async_op *op = NULL;
	unsigned long flags;
	long wait_ret;

	spin_lock_irqsave(&pool->lock, flags);

	if (!list_empty(&pool->free_list)) {
		op = list_first_entry(&pool->free_list, struct async_op, list);
		list_del_init(&op->list);
		op->in_use = true;
		pool->free_count--;
	}

	spin_unlock_irqrestore(&pool->lock, flags);

	if (op) {
		return op;
	}

	// 没有可用操作，等待
	if (timeout_ms > 0) {
		wait_ret = wait_event_interruptible_timeout(pool->wait_queue,
							    !list_empty(&pool->free_list),
							    msecs_to_jiffies(timeout_ms));

		if (wait_ret > 0) {
			// 重新尝试获取
			spin_lock_irqsave(&pool->lock, flags);
			if (!list_empty(&pool->free_list)) {
				op = list_first_entry(&pool->free_list, struct async_op, list);
				list_del_init(&op->list);
				op->in_use = true;
				pool->free_count--;
			}
			spin_unlock_irqrestore(&pool->lock, flags);
		}
	}

	return op;
}

int get_async_ops_batch(struct thread_data *tdata, struct async_op **ops, int count, int timeout_ms)
{
	struct async_op_pool *pool = tdata->op_pool;
	unsigned long flags;
	int acquired = 0;
	long wait_ret;
	bool need_wait = false;

	// 快速路径：尝试一次性获取所有操作
	spin_lock_irqsave(&pool->lock, flags);

	if (pool->free_count >= count) {
		// 有足够的操作，批量获取
		struct async_op *op, *tmp;
		list_for_each_entry_safe(op, tmp, &pool->free_list, list) {
			if (acquired >= count)
				break;

			list_del_init(&op->list);
			op->in_use = true;
			ops[acquired++] = op;
		}
		pool->free_count -= acquired;
	} else {
		need_wait = true;
	}

	spin_unlock_irqrestore(&pool->lock, flags);

	if (acquired == count) {
		return acquired;
	}

	if (!need_wait) {
		// 部分获取成功，直接返回
		return acquired;
	}

	// 等待路径：等待足够的操作可用
	if (timeout_ms > 0) {
		wait_ret = wait_event_interruptible_timeout(pool->wait_queue,
							    pool->free_count >= (count - acquired),
							    msecs_to_jiffies(timeout_ms));

		if (wait_ret > 0) {
			// 再次尝试获取
			spin_lock_irqsave(&pool->lock, flags);

			int remaining = count - acquired;
			struct async_op *op, *tmp;

			list_for_each_entry_safe(op, tmp, &pool->free_list, list) {
				if (remaining <= 0)
					break;

				list_del_init(&op->list);
				op->in_use = true;
				ops[acquired++] = op;
				remaining--;
			}
			pool->free_count -= (count - acquired - remaining);

			spin_unlock_irqrestore(&pool->lock, flags);
		}
	}

	return acquired;
}

void put_async_op(struct thread_data *tdata, struct async_op *op)
{
	struct async_op_pool *pool = tdata->op_pool;
	unsigned long flags;
	bool wakeup = false;

	spin_lock_irqsave(&pool->lock, flags);

	if (op->in_use) {
		op->in_use = false;
		list_add_tail(&op->list, &pool->free_list);
		pool->free_count++;
		wakeup = true;
	}

	spin_unlock_irqrestore(&pool->lock, flags);

	if (wakeup) {
		wake_up_interruptible(&pool->wait_queue);
	}
}

void put_async_ops_batch(struct thread_data *tdata, struct async_op **ops, int count)
{
	struct async_op_pool *pool = tdata->op_pool;
	unsigned long flags;
	bool wakeup = false;
	int i;

	spin_lock_irqsave(&pool->lock, flags);

	for (i = 0; i < count; i++) {
		if (ops[i] && ops[i]->in_use) {
			ops[i]->in_use = false;
			list_add_tail(&ops[i]->list, &pool->free_list);
			pool->free_count++;
			wakeup = true;
		}
	}

	spin_unlock_irqrestore(&pool->lock, flags);

	if (wakeup) {
		wake_up_interruptible(&pool->wait_queue);
	}
}

void cleanup_async_op_pool(struct thread_data *tdata)
{
	struct async_op_pool *pool = tdata->op_pool;
	int i;

	if (!pool)
		return;

	// 清空空闲链表
	INIT_LIST_HEAD(&pool->free_list);

	for (i = 0; i < pool->total_count; i++) {
		if (pool->ops[i]) {
			if (tdata->params->is_hash) {
				ahash_request_free(pool->ops[i]->ahreq);
			} else {
				skcipher_request_free(pool->ops[i]->skreq);
			}
			kfree(pool->ops[i]->src_buf);
			kfree(pool->ops[i]->dst_buf);
			kfree(pool->ops[i]);
		}
	}

	pool->free_count = 0;
	pool->total_count = 0;
	kfree(tdata->op_pool);
	tdata->op_pool = NULL;
}

void get_pool_status(struct thread_data *tdata, int *free_count, int *total_count)
{
	struct async_op_pool *pool = tdata->op_pool;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);
	*free_count = pool->free_count;
	*total_count = pool->total_count;
	spin_unlock_irqrestore(&pool->lock, flags);
}
