import argparse
import os

import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor
from torchmetrics.image import StructuralSimilarityIndexMeasure

import torch.distributed as dist

import torch.multiprocessing as mp




from models.model import MyModel
from utils.data_load import SequenceDataset
from utils.logging_utils import get_logger
from utils.save_model.save_model_path import get_save_distributed_data_parallel_model_path

logger = get_logger()

os.environ["CUDA_VISIBLE_DEVICES"] = "3,5,6" # 指定使用的GPU


def setup(rank, world_size):
    # 初始化进程组
    # os.environ['MASTER_ADDR'] = 'localhost'
    # os.environ['MASTER_PORT'] = '16655' #一机多卡不需要这个，有这个会导致不能开始
    dist.init_process_group("nccl",init_method='tcp://127.0.0.1:12655', rank=rank, world_size=world_size)

    # torch.distributed.init_process_group(backend='gloo', init_method='file:///home/user/switch.txt', world_size=world_size,
    #                                      rank=rank)

def cleanup():
    dist.destroy_process_group()


def main_worker(rank, world_size,args):
    setup(rank, world_size)
    print(f"main_worker ----------rank:{rank}--------------")
    # 创建模型并移动到当前GPU
    model = MyModel().to(rank)
    save_model_path = get_save_distributed_data_parallel_model_path()
    if save_model_path is not None:
        if os.path.exists(save_model_path):
            model.load_state_dict(torch.load(save_model_path, map_location=rank, weights_only=True))
    ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=[rank])
    print(f"main_worker load_state_dict ----------rank:{rank}--------------")
    # 定义数据变换（可选）
    transform = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
    ])

    # 创建Dataset和DataLoader
    dataset = SequenceDataset(
        csv_file=args.csv_file,
        image_prefix_path=args.image_prefix_path,
        transform=transform,
        sequence_length=40,
        split_ratio=0.5  # 前20张作为输入，后20张作为目标
    )
    print(f"main_worker dataset ----------rank:{rank}--------------")
    # 数据集划分
    sampler = torch.utils.data.distributed.DistributedSampler(
        dataset, num_replicas=world_size, rank=rank
    )
    print(f"main_worker sampler ----------rank:{rank}--------------")
    dataloader = DataLoader(
        dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.num_workers,
        prefetch_factor=10,
        persistent_workers=True,
        pin_memory=True, # 如果使用GPU，建议开启
        sampler=sampler
    )
    print(f"main_worker dataloader ----------rank:{rank}--------------")
    # 2. 定义损失函数和优化器
    criterion = nn.MSELoss()  # 回归任务常用损失
    optimizer = optim.Adam(ddp_model.parameters(), lr=1e-4,weight_decay=1e-5 ) #  weight_decay=1e-5 L2 正则化
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)  # 学习率衰减

    model.train()

    # 保存模型
    try:
        # 训练模型
        train(model, rank, ddp_model, dataloader, criterion, optimizer, scheduler, num_epochs=5000)
        torch.save(model.state_dict(), get_save_distributed_data_parallel_model_path())
    except Exception as e:
        print(e)

    cleanup()









# 3. 训练循环
def train(model, rank, ddp_model, dataloader, criterion, optimizer, scheduler, num_epochs=50):
    l1_mean = nn.L1Loss(reduction='mean').to(rank)
    for epoch in range(num_epochs):
        running_loss = 0.0
        for batch_idx, (inputs, targets) in enumerate(dataloader):



            # 移动数据到设备
            inputs = inputs.to(rank)
            targets = targets.to(rank)
            # logger.info(f'Epoch [{epoch + 1}/{num_epochs}], Batch [{batch_idx + 1}/{len(dataloader)}]')
            #
            # 前向传播
            outputs = ddp_model(inputs)
            mse_loss = criterion(outputs, targets)

            outputs_shape = outputs.shape
            # outputs_1 = outputs.reshape(outputs_shape[0]*outputs_shape[2], outputs_shape[1], outputs_shape[3], outputs_shape[4])
            # targets_1 = targets.reshape(outputs_shape[0]*outputs_shape[2], outputs_shape[1], outputs_shape[3], outputs_shape[4])

            outputs_1 = outputs.permute(0,2,1,3,4).reshape(outputs_shape[0]*outputs_shape[2], outputs_shape[1], outputs_shape[3], outputs_shape[4])
            targets_1 = targets.permute(0,2,1,3,4).reshape(outputs_shape[0]*outputs_shape[2], outputs_shape[1], outputs_shape[3], outputs_shape[4])

            l1_mean_loss = l1_mean(outputs_1, targets_1)
            ssim_loss = 1 - StructuralSimilarityIndexMeasure(data_range=1.0).to(rank)(outputs_1, targets_1)
            total_loss = 0.5 * mse_loss + 0.9 * ssim_loss + l1_mean_loss

            if torch.isnan(total_loss).any() or torch.isinf(total_loss).any():
                print("损失值为 NaN/Inf！")
                exit()

            # for name, param in model.named_parameters():
            #     if param.grad is not None:
            #         print(f"Parameter: {name}, Gradient max: {param.grad.max()}, Gradient min: {param.grad.min()}")
            #         if torch.isnan(param.grad).any():
            #             print(f"NaN梯度出现在参数: {name}")
            #         if torch.isinf(param.grad).any():
            #             print(f"Inf梯度出现在参数: {name}")

            # 反向传播和优化
            optimizer.zero_grad()
            # 使用 torch.autograd.detect_anomaly() 自动检测反向传播中的异常
            # with torch.autograd.detect_anomaly():
            total_loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # 梯度剪裁
            optimizer.step()

            for name, param in model.named_parameters():
                if torch.isnan(param).any() or torch.isinf(param).any():
                    print(f"参数 {name} 包含 NaN/Inf！")
                    exit()

            # 统计损失
            running_loss += total_loss.item()

            # 打印进度
            if batch_idx % 10 == 0:  # 每10个batch打印一次
                avg_loss = running_loss / 10
                # print(
                #     f'Epoch [{epoch + 1}/{num_epochs}], Batch [{batch_idx + 1}/{len(dataloader)}], Loss: {avg_loss:.4f}')
                logger.info(
                    f'Epoch [{epoch + 1}/{num_epochs}], Batch [{batch_idx + 1}/{len(dataloader)}], Loss: {avg_loss:.4f}')

                running_loss = 0.0
            # if batch_idx % 10 == 9:
            #     showOrSaveImgBatch(origin, pred, isShowBatch=False, isSaveImageBatch=True,
            #                        imgFitDir=("../imgData/{}/{}/".format(input_model_name, itemName)),
            #                        is_normalization=is_normalization)

        # 保存模型
        torch.save(model.state_dict(), get_save_distributed_data_parallel_model_path())
        # 学习率衰减
        scheduler.step()




# 6. 主程序
if __name__ == "__main__":
    # 假设你已经定义了训练集、验证集和测试集的dataloader
    # train_loader = DataLoader(...)
    # val_loader = DataLoader(...)
    # test_loader = DataLoader(...)

    parser = argparse.ArgumentParser(description="命令行工具")

    # 添加参数
    parser.add_argument("--csv_file", help="csv_file文件路径", default=r'/DATA/disk1/hu/weather/Train/Train.csv')
    parser.add_argument("--image_prefix_path", help="image 图片路径前缀", default=r'/DATA/disk1/hu/weather/Train/Train/Radar')
    parser.add_argument("--batch_size", help="batch_size", default=60)
    parser.add_argument("--num_workers", help="num_workers", default=5)
    args = parser.parse_args()

    # 启动多进程
    world_size = torch.cuda.device_count()
    mp.spawn(main_worker, args=(world_size,args,), nprocs=world_size)


    # 加载模型（示例）
    # model.load_state_dict(torch.load("trained_model.pth"))