"""
    ================================================================================
                            ------------utf-8--------------
    ================================================================================
@Author: 
    rfdsg
@Create Time: 
    2025/3/6 - 21:21
@Description:

@Attention:
    
"""
import torch
from torch import Tensor, nn
from torch.jit import RecursiveScriptModule


def auto_batch_size(fun_list, data, max_batch=100):
    batch_size = 1
    while True:
        try:
            # 测试当前batch_size是否可行
            test_model = fun_list[0].cuda()
            _ = test_model(data)
            del test_model
            torch.cuda.empty_cache()
            return min(batch_size, max_batch)
        except RuntimeError as e:
            if 'CUDA out of memory' in str(e):
                batch_size = max(batch_size // 2, 1)
            else:
                raise


def cuda_stream(fun_list, data: Tensor):
    num_models = len(fun_list)
    # 定义批次大小（根据显存调整）
    batch_size = 40  # 初始值，逐步增加测试
    streams_per_batch = 4  # 每批使用8个流
    if isinstance(fun_list[0], RecursiveScriptModule):
        models = [fun_list[i].cuda() for i in range(num_models)]
    else:
        models = [
            ([node.cuda() for node in fun.program if isinstance(node, nn.Module)], fun)[1]
            for fun in fun_list
        ]
    results = [None] * num_models  # 预分配列表，按索引存储

    for i in range(0, num_models, batch_size):
        batch_streams = [torch.cuda.Stream() for _ in range(streams_per_batch)]
        # 取当前批次模型
        batch_models = models[i:i + batch_size]
        batch_result = results[i:i + batch_size]
        for index, model in enumerate(batch_models):
            model_idx = index  # 计算全局索引
            with torch.cuda.stream(batch_streams[index % streams_per_batch]):
                with torch.no_grad():
                    output = model(data)
                    batch_result[model_idx] = output  # 直接按索引存储
            del model

        torch.cuda.synchronize()  # 确保所有流都完成
        # **彻底删除模型并释放显存**
        batch_models.clear()  # 清空列表
        del batch_models  # 删除列表本身
        torch.cuda.empty_cache()  # 清理 PyTorch 缓存
        print(f'完成{i}batch')
        yield batch_result  # 现在 results 一定是按顺序的


def cuda_stream_last(fun_list, data: Tensor):
    num_models = len(fun_list)
    # 定义批次大小（根据显存调整）
    batch_size = 40  # 初始值，逐步增加测试
    streams_per_batch = 4  # 每批使用8个流
    models = [fun_list[i].cuda() for i in range(num_models)]
    results = [None] * num_models  # 预分配列表，按索引存储

    for i in range(0, num_models, batch_size):
        batch_streams = [torch.cuda.Stream() for _ in range(streams_per_batch)]
        # 取当前批次模型
        batch_models = models[i:i + batch_size]
        batch_result = results[i:i + batch_size]
        for index, model in enumerate(batch_models):
            model_idx = index  # 计算全局索引
            with torch.cuda.stream(batch_streams[index % streams_per_batch]):
                with torch.no_grad():
                    output = model(data)
                    batch_result[model_idx] = output  # 直接按索引存储
            del model

        torch.cuda.synchronize()  # 确保所有流都完成
        # **彻底删除模型并释放显存**
        batch_models.clear()  # 清空列表
        del batch_models  # 删除列表本身
        torch.cuda.empty_cache()  # 清理 PyTorch 缓存
        print(f'完成{i}batch')
        yield batch_result  # 现在 results 一定是按顺序的


def for_parallel(fun_lists, data: Tensor):
    num_models = len(fun_lists)
    # 定义批次大小（根据显存调整）
    batch_size = 10  # 初始值，逐步增加测试
    results = []

    for i in range(0, num_models, batch_size):
        # 取当前批次模型
        batch_models = fun_lists[i:i + batch_size]

        # 将模型逐个移动到GPU并推理
        batch_results = []
        for model in batch_models:
            model_gpu = model.cuda()  # 显式移动到GPU
            with torch.no_grad():
                output = model_gpu(data).cpu()
            yield output
            del model_gpu  # 删除GPU上的模型副本
            torch.cuda.empty_cache()  # 立即释放显存

        results.extend(batch_results)
    return results


def model_batch(models, data: Tensor):
    batch_size = 10  # 根据显存调整

    for i in range(0, 1000, batch_size):
        batch_models = models[i:i + batch_size]

        # 处理当前批次模型
        for model in batch_models:
            model_gpu = model.cuda()
            output = model_gpu(data).cpu()
            torch.save(output, f"output_model{i}.pt")
            del model_gpu
            torch.cuda.empty_cache()
