import argparse
import logging
import os
from dataclasses import dataclass, field
from pathlib import Path
from typing import Union, Optional, List

import torch
from sklearn.metrics import accuracy_score
from torch import nn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from torchvision import transforms

from yms_zsl.dataset_utils.CustomImageDataset import ImageClassificationDataset
from yms_zsl.models.HSAZLM import CNN, DRCAE
from yms_zsl.tools.tool import create_csv, calculate_metric, \
    append_metrics_to_csv, get_device
from yms_zsl.tools.tools import setup_save_dirs, setup_logger
from yms_zsl.tools.train_eval_utils import train_cls_one_epoch, eval_cls_one_epoch, train_cae_one_epoch

logger = logging.getLogger('yms')

def main(configs):
    device = configs.device
    transform = configs.transform
    save_dir = configs.save_dir
    cnn_results_file = os.path.join(save_dir, 'cnn_metrics.csv')
    cnn_metrics = ['train_loss', 'val_loss', 'accuracy', 'precision', 'recall', 'f1-score', 'lr']
    decea_results_file = os.path.join(save_dir, 'decae_metrics.csv')
    decae_metrics = ['epoch', 'train_loss', 'val_loss', 'lr']
    create_csv(cnn_metrics, cnn_results_file)
    create_csv(decae_metrics, decea_results_file)

    train_dataset = ImageClassificationDataset(configs.data_dir, transform=transform, train_class=configs.train_class)
    val_dataset = ImageClassificationDataset(configs.data_dir, transform=transform, mode='val', train_class=configs.train_class)
    train_loader = DataLoader(train_dataset, batch_size=configs.batch_size, shuffle=True, num_workers=configs.num_workers)
    val_loader = DataLoader(val_dataset, batch_size=configs.batch_size, shuffle=False, num_workers=configs.num_workers)
    classes = train_dataset.classes

    cnn_model = CNN(len(classes)).to(device)
    decae = DRCAE().to(device)
    cnn_optimizer = torch.optim.Adam(cnn_model.parameters(), lr=configs.cnn_lr)
    decae_optimizer = torch.optim.Adam(decae.parameters(), lr=configs.decae_lr)
    cnn_scheduler = ReduceLROnPlateau(cnn_optimizer, factor=0.1, min_lr=1e-8, patience=6, mode='min')
    decae_scheduler = ReduceLROnPlateau(decae_optimizer, factor=0.1, min_lr=1e-8, patience=6, mode='min')
    cnn_criterion = nn.CrossEntropyLoss()
    decae_criterion = nn.MSELoss()
    # 记录结果
    cnn_best = 1e8
    decae_best = 1e8
    num_epochs = configs.epochs

    # 添加早停相关参数
    patience = configs.patience  # 从外部参数获取耐心值
    early_stop_counter = 0  # 早停计数器
    best_epoch = 0  # 最佳模型的epoch
    logger.info('CNN training...')
    for epoch in range(num_epochs):
        training_lr = cnn_scheduler.get_last_lr()[0]
        train_loss, train_accuracy = train_cls_one_epoch(model=cnn_model, train_loader=train_loader, device=device,
                                                         optimizer=cnn_optimizer, criterion=cnn_criterion, epoch=epoch)
        logger.info(f'Epoch {epoch + 1}/{num_epochs}, Loss: {train_loss:.5f}, Train Accuracy: {train_accuracy:.4%},'
                    f'lr: {training_lr}')

        result = eval_cls_one_epoch(model=cnn_model, val_loader=val_loader,
                                    device=device, criterion=cnn_criterion, epoch=epoch)

        metric = calculate_metric(result['y_true'], result['y_pred'], classes)
        metric["accuracy"] = accuracy_score(y_true=result['y_true'], y_pred=result['y_pred'])
        logger.info(f'val epoch {epoch + 1}, val loss: {result["val_loss"]}, accuracy: {metric["accuracy"]:.2%}')
        metric.update({'epoch': epoch, 'train_loss': train_loss, 'val_loss': result['val_loss'], 'lr': training_lr})
        append_metrics_to_csv(metric, cnn_results_file)
        cnn_scheduler.step(metric['val_loss'])

        current_score = metric['val_loss']
        if current_score < cnn_best:
            cnn_best = current_score
            best_epoch = epoch
            early_stop_counter = 0  # 重置早停计数器
            cnn_model.save(os.path.join(save_dir, 'checkpoints', 'cnn.pth'))
            logger.info(f'Best model saved at epoch {epoch + 1} with val_loss: {cnn_best:.6f}')
        elif patience > 0 and training_lr < (configs.cnn_lr * 0.001):  # 仅当patience>0时执行早停计数
            early_stop_counter += 1
            logger.info(f'Early stopping counter: {early_stop_counter}/{patience}')

            # 如果早停计数器达到耐心值，停止训练
            if early_stop_counter >= patience:
                logger.info(f'Early stopping triggered after {epoch + 1} epochs')
                break

    logger.info(f'Best model was saved at epoch {best_epoch + 1} with val_loss: {cnn_best:.4f}')

    logger.info('DECAE training...')
    # 添加早停相关参数
    patience = configs.patience  # 从外部参数获取耐心值
    early_stop_counter = 0  # 早停计数器
    best_epoch = 0  # 最佳模型的epoch
    for epoch in range(num_epochs):
        training_lr = decae_scheduler.get_last_lr()[0]
        train_loss, val_loss=train_cae_one_epoch(model=decae, train_loader=train_loader, val_loader=val_loader,
                                                 optimizer=decae_optimizer, criterion=decae_criterion, epoch=epoch, device=device)
        decae_scheduler.step(val_loss)
        metric={'epoch': epoch, 'train_loss': train_loss, 'val_loss': val_loss, 'lr': training_lr}
        logger.info(f'epoch:{epoch}, 训练指标为: {metric}')
        append_metrics_to_csv(metric, decea_results_file)

        current_score = val_loss
        if current_score < decae_best:
            decae_best = current_score
            best_epoch = epoch
            early_stop_counter = 0  # 重置早停计数器
            decae.save(os.path.join(save_dir, 'checkpoints', 'encoder.pth'))
            logger.info(f'Best model saved at epoch {epoch + 1} with val_loss: {decae_best:.6f}')
        elif patience > 0 and training_lr < (configs.decae_lr * 0.001):  # 仅当patience>0时执行早停计数
            early_stop_counter += 1
            logger.info(f'Early stopping counter: {early_stop_counter}/{patience}')

            # 如果早停计数器达到耐心值，停止训练
            if early_stop_counter >= patience:
                logger.info(f'Early stopping triggered after {epoch + 1} epochs')
                break

    logger.info(f'Best model was saved at epoch {best_epoch + 1} with val_loss: {decae_best:.4f}')



@dataclass
class TrainConfig:
    # 设备与数据变换（添加明确类型注解）
    device: torch.device = field(
        default_factory=get_device,  # 用default_factory延迟初始化，避免模块导入时执行
        metadata={"desc": "训练使用的设备（CPU/GPU）"},
        repr=False

    )
    transform: transforms.Compose = field(
        default_factory=lambda: transforms.Compose([  # 用default_factory创建Compose实例
            transforms.Resize((32, 32)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ]),
        metadata={"desc": "图像预处理变换管道"},
        repr=False
    )
    # -------------------------- 路径参数--------------------------
    data_dir: Union[str, Path] = r'D:\Code\2-ZSL\0-data\CWRU\dataset'  # 扩展为Union类型
    save_dir: Union[str, Path] = r'D:\Code\2-ZSL\1-output\特征解耦结果\exp-3'
    train_class: Union[str, Path] = r'D:\Code\2-ZSL\0-data\CWRU\dataset\seen_classes.txt'
    # -------------------------- 训练超参数--------------------------
    epochs: int = 1
    batch_size: int = 40
    cnn_lr: float = 1e-3
    decae_lr: float = 1e-3
    weight_decay: float = 1e-5
    patience: int = 10
    prefix: Optional[str] = 'exp'

    num_workers: int = 0
    ignore_factors: Optional[List[str]] = None


if __name__ == '__main__':
    opts = TrainConfig(data_dir=r'D:\Code\2-ZSL\0-data\data\data',
                          save_dir=r'D:\Code\2-ZSL\1-output\论文实验结果\对比方法\CWRU\HSAZLM\H01',
                          train_class=r'D:\Code\2-ZSL\0-data\data\data/seen_classes.txt',
                          batch_size=256
                          )
    opts.save_dir = setup_save_dirs(opts.save_dir, opts.prefix)
    logger = setup_logger(opts.save_dir)
    # 打印配置并启动训练（此时会触发设备信息日志）
    logger.info("=" * 60)
    logger.info("【单独运行】分类器训练流程")
    logger.info(f"配置信息:{vars(opts)}")
    logger.info("=" * 60)
    main(opts)
