import torch
import os

iteration = 0


def print_rank_0(message):
    """If distributed is initialized, print only on rank 0."""
    if torch.distributed.is_initialized():
        if torch.distributed.get_rank() % 8 == 0:
            print(message, flush=True)
    else:
        print(message, flush=True)


def forward_hook(name):  # print datatype, shape, norm of inputs and output of forward
    def print_dtype_hooks(module, inputs, output):
        global iteration
        if 0 <= iteration:
            # 打印层的名称和输出数据的dtype
            if isinstance(inputs, tuple):
                for ind, value in enumerate(inputs):
                    if value is None:
                        print_rank_0(f"inputs, {name}, index={ind}, None")
                    elif isinstance(value, torch.Tensor):
                        if inputs[ind].dtype not in [torch.float32, torch.float16, torch.bfloat16]:
                            print_rank_0(f"inputs, {name}, index={ind}, {inputs[ind]}")
                        else:
                            print_rank_0(
                                f"inputs, {name}, index={ind}, DataType:{inputs[ind].dtype}, "
                                f"Shape:{inputs[ind].shape}, "
                                f"Norm:{inputs[ind].norm().item()}, "
                                f"mean:{inputs[ind].mean()}, min:{torch.min(inputs[ind])}, "
                                f"max:{torch.max(inputs[ind])}")
            elif isinstance(inputs, torch.Tensor):
                print_rank_0(
                    f"input0, {name}, DataType:{inputs.dtype}, Shape:{inputs.shape}, "
                    f"Norm:{inputs.norm().item()}, mean:{inputs.mean()}, "
                    f"min:{torch.min(inputs)}, max:{torch.max(inputs)}")

            if isinstance(output, tuple):
                for ind, value in enumerate(output):
                    if value is None:
                        print_rank_0(f"output, {name}, index={ind}, None")
                    elif isinstance(value, torch.Tensor):
                        if output[ind].dtype not in [torch.float32, torch.float16, torch.bfloat16]:
                            print_rank_0(f"inputs, {name}, index={ind}, {output[ind]}")
                        else:
                            print_rank_0(
                                f"output, {name}, index={ind}, DataType:{output[ind].dtype}, "
                                f"Shape:{output[ind].shape}, "
                                f"Norm:{output[ind].norm().item()}, "
                                f"mean:{output[ind].mean()}, "
                                f"min:{torch.min(output[ind])}, max:{torch.max(output[ind])}")
            elif isinstance(output, torch.Tensor):
                try:
                    print_rank_0(
                        f"output0, {name}, DataType:{output.dtype}, Shape:{output.shape}"
                        f"Norm:{output.contiguous().norm().item()}, mean:{output.contiguous().mean()}, "
                        f"min:{torch.min(output)}, max:{torch.max(output)}")
                except Exception as e:
                    print_rank_0(e)

    return print_dtype_hooks


def backward_hook(name):  # print datatype, shape, norm of input and output of backward
    def print_dtype_hooks(module, grad_input, grad_output):
        global iteration
        if 0 <= iteration:
            # 打印层的名称和输出数据的dtype
            if isinstance(grad_input, tuple):
                for ind, value in enumerate(grad_input):
                    if value is None:
                        print(f"grad_input, {name}, index={ind}, None")
                    elif isinstance(value, torch.Tensor):
                        print(
                            f"grad_input, {name}, index={ind}, DataType:{grad_input[ind].dtype}, "
                            f"Shape:{grad_input[ind].shape}, Norm:{grad_input[ind].norm().item()} "
                            f"mean: {torch.mean(grad_input[ind])} min: {torch.min(grad_input[ind])} "
                            f"max: {torch.max(grad_input[ind])}")
            elif isinstance(grad_input, torch.Tensor):
                print(
                    f"grad_input0, {name}, DataType:{grad_input.dtype}, "
                    f"Shape:{grad_input.shape}, Norm:{grad_input.norm().item()} "
                    f"mean: {torch.mean(grad_input)} min: {torch.min(grad_input)} max: {torch.max(grad_input)}")

            if isinstance(grad_output, tuple):
                for ind, value in enumerate(grad_output):
                    if value is None:
                        print(f"grad_output, {name}, index={ind}, None")
                    elif isinstance(value, torch.Tensor):
                        print(
                            f"grad_output, {name}, index={ind}, DataType:{grad_output[ind].dtype}, "
                            f"Shape:{grad_output[ind].shape}, Grad_Norm:{grad_output[ind].norm().item()} "
                            f"mean: {torch.mean(grad_output[ind])} min: {torch.min(grad_output[ind])} "
                            f"max: {torch.max(grad_output[ind])}")
            elif isinstance(grad_output, torch.Tensor):
                print(
                    f"grad_output0, {name}, DataType:{grad_output.dtype}, Shape:{grad_output.shape}, "
                    f"Norm:{grad_output.norm().item()} "
                    f"mean: {torch.mean(grad_output)} min: {torch.min(grad_output)} max: {torch.max(grad_output)}")

    return print_dtype_hooks


def save_forward_hook(name, save_path):
    def save_value_hook(module, inputs, outputs):
        rank = torch.distributed.get_rank()
        torch.save(inputs, save_path + f"{rank}_{name}_input.pt")
        torch.save(outputs, save_path + f"{rank}_{name}_output.pt")

    return save_value_hook


def register_hooks(model, save_tensor_path=None):
    if save_tensor_path is not None:
        os.makedirs(save_tensor_path, exist_ok=True)
    for name, module in model.named_modules():
        # 过滤掉不需要的模块
        if isinstance(module, (torch.nn.Module)) and 'cross_attn_bda' not in name:  # 根据需要调整条件
            print_forward_hook = forward_hook(name)
            print_backward_hook = backward_hook(name)
            module.register_forward_hook(print_forward_hook)
            module.register_backward_hook(print_backward_hook)
            if save_tensor_path is not None:
                module.register_forward_hook(save_forward_hook(name, save_tensor_path))