import argparse
import os

import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torch.utils.data import DataLoader, Subset
from torchvision.transforms import ToTensor
from torchmetrics.image import StructuralSimilarityIndexMeasure
from tqdm import tqdm

from models.encoder_decoder_conv_lstm import EncoderDecoderConvLSTM
from utils.data_load import SequenceDataset
from utils.device_utils import get_device
from utils.logging_utils import get_logger
from utils.random_seed import random_seed
from utils.save_model.save_model_path import get_save_encoder_decoder_conv_lstm_model_checkpoint_path, \
    get_save_encoder_decoder_conv_lstm_model_path
from utils.save_model.save_model_utils import save_model_handle, save_checkpoint_handle, load_checkpoint_handle, \
    handle_save_int
from utils.summary_writer_utils import get_summary_writer, get_encoder_decoder_conv_lstm_summary_writer_path
from utils.train.train_utils import loss_batch_handle, model_parameters_isnan_or_inf, total_loss_isnan_or_inf, lr_batch_handle

# import sys
# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))) #添加py搜索路径 方法一


# # 获取当前文件的绝对路径 #添加py搜索路径 方法二
# current_dir = os.path.dirname(os.path.abspath(__file__))
# # 向上回退两级目录到项目根目录
# project_root = os.path.dirname(os.path.dirname(current_dir))
# # 将项目根目录添加到Python路径
# sys.path.insert(0, project_root)

logger = get_logger()






# 设置随机种子
random_seed(6)

# 定义目标 GPU 设备（第 1 个 GPU）
device = get_device(target_gpu=0)

checkpoint_path = get_save_encoder_decoder_conv_lstm_model_checkpoint_path()



# 3. 训练循环
def train(model, train_dataloader, test_dataloader, criterion, optimizer, scheduler, num_epochs=50, writer=None, save_epoch=None, save_batch_idx=None):
    dataloader_len = len(train_dataloader)
    for epoch in range(handle_save_int(save_epoch),num_epochs):
        model.train()
        running_loss = 0.0
        total_epoch_loss = 0.0
        for batch_idx, (inputs, targets) in tqdm(
                enumerate(train_dataloader, start=handle_save_int(save_batch_idx)),
                total=len(train_dataloader),  # 总batch数
                initial=handle_save_int(save_batch_idx),  # 初始索引
                desc=f"Epoch {epoch+1}/{num_epochs}",
                leave=False  # 避免残留进度条
        ):
            # 移动数据到设备
            inputs = inputs.to(device)
            targets = targets.to(device)
            # 前向传播
            outputs = model(inputs)

            outputs_shape = outputs.shape

            outputs_1 = outputs.permute(0,2,1,3,4).reshape(outputs_shape[0]*outputs_shape[2], outputs_shape[1], outputs_shape[3], outputs_shape[4])
            targets_1 = targets.permute(0,2,1,3,4).reshape(outputs_shape[0]*outputs_shape[2], outputs_shape[1], outputs_shape[3], outputs_shape[4])

            total_loss = criterion(outputs_1, targets_1,rank=device)

            total_loss_isnan_or_inf(total_loss)

            # 反向传播和优化
            optimizer.zero_grad()
            # 使用 torch.autograd.detect_anomaly() 自动检测反向传播中的异常
            # with torch.autograd.detect_anomaly():
            total_loss.backward()
            optimizer.step()
            # 学习率衰减
            scheduler.step()

            model_parameters_isnan_or_inf(model)

            # 统计损失
            running_loss += total_loss.item()
            total_epoch_loss +=running_loss

            # 打印进度
            if batch_idx % 10 == 0:  # 每10个batch打印一次
                avg_loss = running_loss / 10
                global_step = epoch * len(train_dataloader) + batch_idx
                loss_batch_handle('Loss/batch',writer,avg_loss,global_step)
                running_loss = 0.0


                # 获取当前学习率
                current_lr = optimizer.param_groups[0]['lr']
                lr_batch_handle('Learning Rate/OneCycleLR',writer,current_lr,global_step)


        # 保存模型检查点
        save_checkpoint_handle(model,optimizer, scheduler, epoch, batch_idx, running_loss,checkpoint_path)
        # 学习率衰减
        # scheduler.step()

        loss_batch_handle('Loss/Train/Epoch', writer, total_epoch_loss/dataloader_len, epoch)
        avg_test_loss=test(model, test_dataloader, criterion)
        loss_batch_handle('Loss/Test/Epoch', writer, avg_test_loss, epoch)



# 混合损失函数
def loss_function(pred, target,rank=0):
    mse = nn.MSELoss().to(rank)(pred, target)
    l1_loss = nn.L1Loss(reduction='mean').to(rank)(pred, target)
    ssim = 1 - StructuralSimilarityIndexMeasure(data_range=1.0).to(rank)(pred, target)
    return 0.6*ssim + 0.3*mse + 0.1*l1_loss


# 4. 验证循环
def validate(model, dataloader, criterion):
    model.eval()
    total_loss = 0.0
    with torch.no_grad():
        for inputs, targets in dataloader:
            inputs = inputs.to(device)
            targets = targets.to(device)

            outputs = model(inputs)
            loss = criterion(outputs, targets,rank=device)
            total_loss += loss.item()

    avg_loss = total_loss / len(dataloader)
    print(f'Validation Loss: {avg_loss:.4f}')
    return avg_loss


# 5. 测试循环（与验证类似，但通常使用单独的测试集）
def test(model, dataloader, criterion):
    model.eval()
    total_loss = 0.0
    with torch.no_grad():
        for inputs, targets in tqdm(
                dataloader,
                desc=f"Text",
                leave=False  # 避免残留进度条
        ):
            inputs = inputs.to(device)
            targets = targets.to(device)

            outputs = model(inputs)
            loss = criterion(outputs, targets,rank=device)
            total_loss += loss.item()

    avg_loss = total_loss / len(dataloader)
    print(f'Test Loss: {avg_loss:.4f}')
    return avg_loss


# def load_checkpoint(checkpoint_path=None):
#     if checkpoint_path is None:
#         return load_checkpoint_handle(checkpoint_path,device)
#     return None


def load_model(checkpoint_data=None):
    # 初始化模型
    model = EncoderDecoderConvLSTM().to(device)  # 灰度图单通道
    # if checkpoint_data is not None and 'model_state_dict' in checkpoint_data:
    #     model.load_state_dict(checkpoint_data['model_state_dict'])
    return model


def get_dataloader(csv_file,image_prefix_path,batch_size,num_workers):
    if csv_file is None or image_prefix_path is None:
        raise None
    # 定义数据变换（可选）
    transform = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
    ])

    # 创建Dataset和DataLoader
    dataset = SequenceDataset(
        csv_file=csv_file,
        image_prefix_path=image_prefix_path,
        transform=transform,
        sequence_length=40,
        split_ratio=0.5  # 前20张作为输入，后20张作为目标
    )

    # small_train_set = Subset(dataset, indices=range(100))

    dataloader = DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers,
        prefetch_factor=10,
        persistent_workers=True,
        pin_memory=True  # 如果使用GPU，建议开启
    )
    return dataloader


def train_encoder_decoder_conv_lstm_main():
    # 假设你已经定义了训练集、验证集和测试集的dataloader

    parser = argparse.ArgumentParser(description="命令行工具")

    # 添加参数
    parser.add_argument("--csv_train_file", help="train csv_file文件路径", default=r'/DATA/disk1/hu/weather/Train/Train.csv')
    parser.add_argument("--image_train_prefix_path", help="train image 图片路径前缀",
                        default=r'/DATA/disk1/hu/weather/Train/Train/Radar')
    # parser.add_argument("--csv_train_file", help="train csv_file文件路径", default=r'/DATA/disk1/hu/weather/test/TestA.csv')
    # parser.add_argument("--image_train_prefix_path", help="train image 图片路径前缀",
    #                     default=r'/DATA/disk1/hu/weather/test/TestA/TestA/Radar')
    parser.add_argument("--csv_test_file", help="test csv_file文件路径", default=r'/DATA/disk1/hu/weather/test/TestA.csv')
    parser.add_argument("--image_test_prefix_path", help="test image 图片路径前缀",
                        default=r'/DATA/disk1/hu/weather/test/TestA/TestA/Radar')
    # parser.add_argument("--csv_file", help="csv_file文件路径", default=r'/DATA/disk1/hu/weather/test/TestA.csv')
    # parser.add_argument("--image_prefix_path", help="image 图片路径前缀",
    #                     default=r'/DATA/disk1/hu/weather/test/TestA/TestA/Radar')
    parser.add_argument("--batch_size", help="batch_size", default=5)
    parser.add_argument("--num_workers", help="num_workers", default=5)
    parser.add_argument("--num_epochs", help="num_epochs", default=120)
    args = parser.parse_args()

    writer = get_summary_writer(get_encoder_decoder_conv_lstm_summary_writer_path())

    model = load_model()

    train_dataloader = get_dataloader(args.csv_train_file, args.image_train_prefix_path, args.batch_size, args.num_workers)
    test_dataloader = get_dataloader(args.csv_test_file, args.image_test_prefix_path, args.batch_size, args.num_workers)

    optimizer = optim.Adam(model.parameters(), lr=3e-4, weight_decay=1e-4)  # weight_decay=1e-5 L2 正则化

    # 动态学习率调度
    scheduler = torch.optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=3e-3,  # 峰值学习率
        total_steps=args.num_epochs*len(train_dataloader),  # 总训练步数
        pct_start=0.3  # 升温阶段比例
    )

    checkpoint_data, save_epoch, save_batch_idx, save_loss = load_checkpoint_handle(model, optimizer, scheduler,
                                                                                    checkpoint_path, device)

    # 训练模型
    train(model, train_dataloader, test_dataloader, loss_function, optimizer, scheduler, num_epochs=args.num_epochs, writer=writer, save_epoch=save_epoch,
          save_batch_idx=save_batch_idx)

    # # 验证模型
    # val_loss = validate(model, val_loader, criterion)
    #
    # # 测试模型
    # test_loss = test(model, test_loader, criterion)

    # 保存模型
    torch.save(model.state_dict(), get_save_encoder_decoder_conv_lstm_model_path())

    # 加载模型（示例）
    # model.load_state_dict(torch.load("trained_model.pth"))

# 6. 主程序
if __name__ == "__main__":
    train_encoder_decoder_conv_lstm_main()