import argparse
import os

import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor
from torchmetrics.image import StructuralSimilarityIndexMeasure




from models.model import MyModel
from utils.data_load import SequenceDataset
from utils.device_utils import get_device
from utils.logging_utils import get_logger
from utils.save_model.save_model_path import get_save_model_path

logger = get_logger()






# 初始化模型

# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 定义目标 GPU 设备（第 1 个 GPU）
device = get_device(target_gpu=4)

model = MyModel().to(device)  # 灰度图单通道

save_model_path = get_save_model_path()
if save_model_path is not None:
    if os.path.exists(save_model_path):
        model.load_state_dict(torch.load(save_model_path, map_location=device,weights_only=True))


# 2. 定义损失函数和优化器
criterion = nn.MSELoss()  # 回归任务常用损失
optimizer = optim.Adam(model.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)  # 学习率衰减


# 3. 训练循环
def train(model, dataloader, criterion, optimizer, scheduler, num_epochs=50):
    model.train()
    l1_mean = nn.L1Loss(reduction='mean').to(device)
    for epoch in range(num_epochs):
        running_loss = 0.0
        for batch_idx, (inputs, targets) in enumerate(dataloader):



            # 移动数据到设备
            inputs = inputs.to(device)
            targets = targets.to(device)
            # logger.info(f'Epoch [{epoch + 1}/{num_epochs}], Batch [{batch_idx + 1}/{len(dataloader)}]')
            #
            # 前向传播
            outputs = model(inputs)
            mse_loss = criterion(outputs, targets)

            outputs_shape = outputs.shape
            # outputs_1 = outputs.reshape(outputs_shape[0]*outputs_shape[2], outputs_shape[1], outputs_shape[3], outputs_shape[4])
            # targets_1 = targets.reshape(outputs_shape[0]*outputs_shape[2], outputs_shape[1], outputs_shape[3], outputs_shape[4])

            outputs_1 = outputs.permute(0,2,1,3,4).reshape(outputs_shape[0]*outputs_shape[2], outputs_shape[1], outputs_shape[3], outputs_shape[4])
            targets_1 = targets.permute(0,2,1,3,4).reshape(outputs_shape[0]*outputs_shape[2], outputs_shape[1], outputs_shape[3], outputs_shape[4])

            ssim_loss = 1 - StructuralSimilarityIndexMeasure(data_range=1.0).to(device)(outputs_1, targets_1)
            l1_mean_loss = l1_mean(outputs_1, targets_1)
            total_loss = 0.7 * mse_loss + 0.9 * ssim_loss + l1_mean_loss

            if torch.isnan(total_loss).any() or torch.isinf(total_loss).any():
                print("损失值为 NaN/Inf！")
                exit()

            # for name, param in model.named_parameters():
            #     if param.grad is not None:
            #         print(f"Parameter: {name}, Gradient max: {param.grad.max()}, Gradient min: {param.grad.min()}")
            #         if torch.isnan(param.grad).any():
            #             print(f"NaN梯度出现在参数: {name}")
            #         if torch.isinf(param.grad).any():
            #             print(f"Inf梯度出现在参数: {name}")

            # 反向传播和优化
            optimizer.zero_grad()
            # 使用 torch.autograd.detect_anomaly() 自动检测反向传播中的异常
            # with torch.autograd.detect_anomaly():
            total_loss.backward()
            optimizer.step()

            for name, param in model.named_parameters():
                if torch.isnan(param).any() or torch.isinf(param).any():
                    print(f"参数 {name} 包含 NaN/Inf！")
                    exit()

            # 统计损失
            running_loss += total_loss.item()

            # 打印进度
            if batch_idx % 10 == 0:  # 每10个batch打印一次
                avg_loss = running_loss / 10
                # print(
                #     f'Epoch [{epoch + 1}/{num_epochs}], Batch [{batch_idx + 1}/{len(dataloader)}], Loss: {avg_loss:.4f}')
                logger.info(
                    f'Epoch [{epoch + 1}/{num_epochs}], Batch [{batch_idx + 1}/{len(dataloader)}], Loss: {avg_loss:.4f}')

                running_loss = 0.0
            # if batch_idx % 10 == 9:
            #     showOrSaveImgBatch(origin, pred, isShowBatch=False, isSaveImageBatch=True,
            #                        imgFitDir=("../imgData/{}/{}/".format(input_model_name, itemName)),
            #                        is_normalization=is_normalization)

        # 保存模型
        torch.save(model.state_dict(), get_save_model_path())
        # 学习率衰减
        scheduler.step()


# 4. 验证循环
def validate(model, dataloader, criterion):
    model.eval()
    total_loss = 0.0
    with torch.no_grad():
        for inputs, targets in dataloader:
            inputs = inputs.to(device)
            targets = targets.to(device)

            outputs = model(inputs)
            loss = criterion(outputs, targets)
            total_loss += loss.item()

    avg_loss = total_loss / len(dataloader)
    print(f'Validation Loss: {avg_loss:.4f}')
    return avg_loss


# 5. 测试循环（与验证类似，但通常使用单独的测试集）
def test(model, dataloader, criterion):
    model.eval()
    total_loss = 0.0
    with torch.no_grad():
        for inputs, targets in dataloader:
            inputs = inputs.to(device)
            targets = targets.to(device)

            outputs = model(inputs)
            loss = criterion(outputs, targets)
            total_loss += loss.item()

    avg_loss = total_loss / len(dataloader)
    print(f'Test Loss: {avg_loss:.4f}')
    return avg_loss


# 6. 主程序
if __name__ == "__main__":
    # 假设你已经定义了训练集、验证集和测试集的dataloader
    # train_loader = DataLoader(...)
    # val_loader = DataLoader(...)
    # test_loader = DataLoader(...)

    parser = argparse.ArgumentParser(description="命令行工具")

    # 添加参数
    parser.add_argument("--csv_file", help="csv_file文件路径", default=r'/DATA/disk1/hu/weather/Train/Train.csv')
    parser.add_argument("--image_prefix_path", help="image 图片路径前缀", default=r'/DATA/disk1/hu/weather/Train/Train/Radar')
    parser.add_argument("--batch_size", help="batch_size", default=60)
    parser.add_argument("--num_workers", help="num_workers", default=5)
    args = parser.parse_args()

    # 定义数据变换（可选）
    transform = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
    ])

    # 创建Dataset和DataLoader
    dataset = SequenceDataset(
        csv_file=args.csv_file,
        image_prefix_path=args.image_prefix_path,
        transform=transform,
        sequence_length=40,
        split_ratio=0.5  # 前20张作为输入，后20张作为目标
    )

    dataloader = DataLoader(
        dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        prefetch_factor=10,
        persistent_workers=True,
        pin_memory=True  # 如果使用GPU，建议开启
    )

    # 训练模型
    train(model, dataloader, criterion, optimizer, scheduler, num_epochs=500)

    # # 验证模型
    # val_loss = validate(model, val_loader, criterion)
    #
    # # 测试模型
    # test_loss = test(model, test_loader, criterion)

    # 保存模型
    torch.save(model.state_dict(), get_save_model_path())

    # 加载模型（示例）
    # model.load_state_dict(torch.load("trained_model.pth"))