# 针对模型进行profile，获取需要的信息
import torch
import torch.nn as nn
import time
from torchvision import models
import threading
import pickle
from parse_config import Config
import model_input
from get_context_memory import get_nvidia_memory,get_pytorch_memory


init_memory = 0
context_memory=0

lock = threading.Lock()

# 初始化全局列表
layer_nodes = []


class LayerNode:
    def __init__(self, name):
        self.name = name
        self.input_shape = []
        self.output_shape = []
        self.input_memory = 0
        self.time_elapsed = 0
        self.peak_memory = 0
        self.output_memory=0

    def __repr__(self):
        return (f"LayerNode(name={self.name}, input_shape={self.input_shape}, "
                f"output_shape={self.output_shape}, input_memory={self.input_memory:.2f} MB, "
                f"time_elapsed={self.time_elapsed:.6f} s, peak_memory={self.peak_memory:.2f} MB)")


# 计算张量的内存占用
def get_tensor_memory(tensor):
    if isinstance(tensor, torch.Tensor):
        return tensor.numel() * tensor.element_size()  # B
    return 0


# 定义钩子函数
def forward_pre_hook(module, input):
    global init_memory  # 声明为全局变量
    """
    前向传播之前的钩子函数，用于记录资源开销和输入参数大小。
    """
    # 创建 LayerNode 实例
    node = LayerNode(name=module.__class__.__name__)

    # 记录输入参数大小
    input_shapes = [tuple(tensor.shape) for tensor in input if isinstance(tensor, torch.Tensor)]
    node.input_shape = input_shapes
    node.input_memory = sum(get_tensor_memory(tensor) for tensor in input) / 1024 ** 2  # 转换为 MB

    # 记录时间
    if not hasattr(forward_pre_hook, 'start_time'):
        forward_pre_hook.start_time = time.time()
    else:
        elapsed_time = time.time() - forward_pre_hook.start_time
        forward_pre_hook.start_time = time.time()  # 重置计时器

    # 将节点保存到全局列表中
    with lock:
        layer_nodes.append(node)


def forward_hook(module, input, output):
    global init_memory  # 声明为全局变量
    """
    前向传播之后的钩子函数，用于记录资源开销。
    """
    # 获取最后一个节点（当前层的节点）
    with lock:
        node = layer_nodes[-1]

    # 记录输出形状
    if isinstance(output, torch.Tensor):
        node.output_shape = [tuple(output.shape)]
    elif isinstance(output, (list, tuple)):
        node.output_shape = [tuple(tensor.shape) for tensor in output if isinstance(tensor, torch.Tensor)]

    # 记录显存占用
    if torch.cuda.is_available():
        torch.cuda.synchronize()
        node.peak_memory = torch.cuda.max_memory_allocated() / 1024 ** 2 + context_memory # 转换为 MB
        
    node.output_memory = sum(get_tensor_memory(tensor) for tensor in output) / 1024 ** 2
        
    torch.cuda.reset_peak_memory_stats()

    # 记录时间
    elapsed_time = time.time() - forward_pre_hook.start_time
    node.time_elapsed = elapsed_time

    # 重置计时器
    forward_pre_hook.start_time = time.time()

def main():
    global init_memory, context_memory
    config_path='./config.json'
    
    config=Config(config_path)

    # 加载模型
    model = model_input.get_model()
    allocated, reserved = get_pytorch_memory()
    nvidia_mem = get_nvidia_memory() 

    # 计算差值
    context_memory = (nvidia_mem - allocated)/1024**2

    # 遍历模型的每一层，注册钩子函数
    hooks = []
    for name, module in model.named_modules():
        if isinstance(module, nn.Module):  # 为所有模块注册钩子
            pre_hook = module.register_forward_pre_hook(forward_pre_hook)
            hook = module.register_forward_hook(forward_hook)
            hooks.append((pre_hook, hook))  # 保存钩子以便后续移除

    # 设置模型为评估模式
    model.eval()

    # 创建一个随机输入（假设输入是 3 通道的 224x224 图像）
    input_tensor = model_input.get_input_data()

    # 将模型和输入数据移到 GPU（如果可用）
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    model.to(device)
    input_tensor = input_tensor.to(device)

    # 前向传播
    with torch.no_grad():
        # 记录初始显存占用
        if torch.cuda.is_available():
            torch.cuda.synchronize()
            init_memory = torch.cuda.memory_allocated() / 1024 ** 2

        # 记录初始时间
        start_time = time.time()

        output = model(input_tensor)

        # 记录总时间
        total_time = time.time() - start_time

        # 记录最终显存占用
        if torch.cuda.is_available():
            torch.cuda.synchronize()
            final_memory = torch.cuda.memory_allocated() / 1024 ** 2

    # 移除钩子
    for pre_hook, hook in hooks:
        pre_hook.remove()
        hook.remove()

    # 将 layer_nodes 保存到文件
    with open(config.config['profiler_result_save_path'], 'wb') as f:
        pickle.dump(layer_nodes, f)
        
    # with open(config.config['profiler_result_save_path'], 'rb') as f:
    #     loaded_layer_nodes = pickle.load(f)
    # print(loaded_layer_nodes)


if __name__ == '__main__':
    main()