import torch
from tqdm import tqdm
import os
import numpy as np

from utils.model_utils import (
    get_device,
    initialize_model,
    get_optimizer,
    get_loss_function,
    save_model,
    load_checkpoint,
    setup_logging
)
from utils.metrics_utils import evaluate_metrics
from utils.plot_utils import plot_training_curves
from utils.model_utils import DiceLoss # 导入DiceLoss用于计算Dice分数

from data.data_loader import get_num_classes, create_data_loaders

class Trainer:
    """训练器类，封装训练和验证逻辑"""
    
    def __init__(self, config):
        self.config = config
        self.device = get_device()
        self.logger = setup_logging(config["training"]["log_dir"])
        
        # 从config中获取类别数
        self.num_classes = get_num_classes(config)
        
        # 初始化模型、优化器、损失函数
        self.model = initialize_model(config).to(self.device)
        self.optimizer = get_optimizer(self.model, config)
        self.criterion = get_loss_function(config)
        self.dice_criterion = DiceLoss() # 用于计算Dice分数，不参与反向传播
        
        # 训练历史记录
        self.train_losses = []
        self.val_losses = []
        self.train_dice_scores = []
        self.val_dice_scores = []
        self.best_val_loss = float("inf")
        self.start_epoch = 0
        
        # 加载检查点（如果存在）
        if config["training"].get("resume_checkpoint", False):
            checkpoint_path = os.path.join(config["training"]["save_dir"], "latest_checkpoint.pth")
            self.start_epoch, self.best_val_loss = load_checkpoint(self.model, self.optimizer, checkpoint_path)
            # TODO: 加载历史训练数据以继续绘图

    def _train_one_epoch(self, train_loader):
        """训练一个epoch"""
        
        self.model.train()
        running_loss = 0.0
        all_pred_masks = []
        all_true_masks = []
        
        for images, masks in tqdm(train_loader, desc="Training", leave=False):
            images = images.to(self.device)
            masks = masks.to(self.device)
            
            self.optimizer.zero_grad()
            outputs = self.model(images)
            loss = self.criterion(outputs, masks)
            loss.backward()
            self.optimizer.step()
            
            running_loss += loss.item()
            
            # 收集预测结果用于计算Dice分数
            pred_masks = torch.argmax(outputs, dim=1)
            all_pred_masks.extend(pred_masks.cpu().numpy())
            all_true_masks.extend(masks.cpu().numpy())
            
        avg_loss = running_loss / len(train_loader)
        
        # 计算训练集Dice分数
        all_pred_masks_tensor = [torch.from_numpy(mask) for mask in all_pred_masks]
        all_true_masks_tensor = [torch.from_numpy(mask) for mask in all_true_masks]
        train_metrics = evaluate_metrics(
            all_pred_masks_tensor,
            all_true_masks_tensor,
            self.num_classes
        )
        avg_train_dice = np.mean(train_metrics["dice"])
        
        return avg_loss, avg_train_dice

    def _validate_one_epoch(self, val_loader):
        """验证一个epoch"""
        
        self.model.eval()
        running_loss = 0.0
        all_pred_masks = []
        all_true_masks = []
        
        with torch.no_grad():
            for images, masks in tqdm(val_loader, desc="Validation", leave=False):
                images = images.to(self.device)
                masks = masks.to(self.device)
                
                outputs = self.model(images)
                loss = self.criterion(outputs, masks)
                running_loss += loss.item()
                
                # 获取预测结果
                pred_masks = torch.argmax(outputs, dim=1)
                all_pred_masks.extend(pred_masks.cpu().numpy())
                all_true_masks.extend(masks.cpu().numpy())
                
        avg_loss = running_loss / len(val_loader)
        
        # 计算Dice分数
        all_pred_masks_tensor = [torch.from_numpy(mask) for mask in all_pred_masks]
        all_true_masks_tensor = [torch.from_numpy(mask) for mask in all_true_masks]
        
        metrics = evaluate_metrics(
            all_pred_masks_tensor,
            all_true_masks_tensor,
            self.num_classes
        )
        avg_dice = np.mean(metrics["dice"])
        
        return avg_loss, avg_dice

    def train(self):
        """开始训练"""
        
        self.logger.info("开始训练...")
        
        for epoch in range(self.start_epoch, self.config["training"]["epochs"]):
            self.logger.info(f"Epoch {epoch+1}/{self.config["training"]["epochs"]}")
            
            # 创建数据加载器（训练集可能需要根据epoch更新增强）
            train_loader, val_loader, _ = create_data_loaders(self.config, current_epoch=epoch)
            
            # 训练
            train_loss, train_dice = self._train_one_epoch(train_loader)
            self.train_losses.append(train_loss)
            self.train_dice_scores.append(train_dice)
            
            # 验证
            val_loss, val_dice = self._validate_one_epoch(val_loader)
            self.val_losses.append(val_loss)
            self.val_dice_scores.append(val_dice)
            
            self.logger.info(f"Train Loss: {train_loss:.4f}, Train Dice: {train_dice:.4f}, Val Loss: {val_loss:.4f}, Val Dice: {val_dice:.4f}")
            
            # 保存最佳模型
            if val_loss < self.best_val_loss:
                self.best_val_loss = val_loss
                save_model(
                    self.model,
                    self.optimizer,
                    epoch,
                    val_loss,
                    os.path.join(self.config["training"]["save_dir"], "best_model.pth"),
                    is_best=True
                )
            
            # 保存最新模型和检查点
            save_model(
                self.model,
                self.optimizer,
                epoch,
                val_loss,
                os.path.join(self.config["training"]["save_dir"], "latest_model.pth")
            )
            
            if (epoch + 1) % self.config["training"]["checkpoint_interval"] == 0:
                save_model(
                    self.model,
                    self.optimizer,
                    epoch,
                    val_loss,
                    os.path.join(self.config["training"]["save_dir"], f"checkpoint_epoch_{epoch+1}.pth")
                )
                save_model(
                    self.model,
                    self.optimizer,
                    epoch,
                    val_loss,
                    os.path.join(self.config["training"]["save_dir"], "latest_checkpoint.pth") # 用于断点续训
                )
            
            # 绘制训练曲线 (根据plot_interval)
            if (epoch + 1) % self.config["training"]["plot_interval"] == 0 or (epoch + 1) == self.config["training"]["epochs"]:
                plot_training_curves(
                    self.train_losses,
                    self.val_losses,
                    self.train_dice_scores,
                    self.val_dice_scores,
                    os.path.join(self.config["training"]["log_dir"], "training_curves.png")
                )
            
        self.logger.info("训练完成！")
        return self.model


if __name__ == "__main__":
    import yaml
    import numpy as np
    
    # 创建一个假的config.yaml内容
    fake_config_content = """
    data:
      image_dir: "./fake_data/images"
      label_dir: "./fake_data/labels"
      train_list: "./fake_data/train.txt"
      val_list: "./fake_data/val.txt"
      test_list: "./fake_data/test.txt"
      image_size: [128, 128]
      name_classes: ["Background", "Class1"]
    augmentations:
      enabled: True
      augment_stop_epoch: 5
      flip: True
      rotate: 10
      scale: [0.8, 1.2]
    model:
      name: "unet"
      pretrained: False
      pretrained_path: ""
    training:
      epochs: 2
      batch_size: 2
      learning_rate: 0.001
      loss_function: "CrossEntropyDiceLoss"
      optimizer: "Adam"
      log_dir: "./logs_test"
      save_dir: "./saved_models_test"
      checkpoint_interval: 1
      plot_interval: 1
      resume_checkpoint: False
    evaluation:
      metric_save_path: "./results/metrics/metrics.json"
      plot_confusion_matrix: True
    inference:
      mode: "batch"
      input_path: "./fake_data/inference_images"
      output_dir: "./pred_results"
      model_path: "./saved_models/best_model.pth"
      visualize: True
      colors:
        - [0, 0, 0]
        - [255, 0, 0]
    """
    
    config = yaml.safe_load(fake_config_content)
    
    # 创建假数据目录和文件
    os.makedirs("./fake_data/images", exist_ok=True)
    os.makedirs("./fake_data/labels", exist_ok=True)
    os.makedirs("./logs_test", exist_ok=True)
    os.makedirs("./saved_models_test", exist_ok=True)
    
    # 创建假图像和标签文件
    from PIL import Image
    for i in range(4):
        img = Image.new("RGB", (128, 128), color = (i*50, i*100, i*150))
        mask = Image.new("L", (128, 128), color = i % 2)
        img.save(f"./fake_data/images/image_{i}.png")
        mask.save(f"./fake_data/labels/image_{i}.png")
    
    with open("./fake_data/train.txt", "w") as f:
        f.write("image_0\nimage_1\n") # 文件名不带后缀
    with open("./fake_data/val.txt", "w") as f:
        f.write("image_2\n") # 文件名不带后缀
    with open("./fake_data/test.txt", "w") as f:
        f.write("image_3\n") # 文件名不带后缀

    print("开始测试Trainer...")
    trainer = Trainer(config)
    trainer.train()
    print("Trainer测试完成！")
    
    # 清理假数据
    import shutil
    shutil.rmtree("./fake_data")
    shutil.rmtree("./logs_test")
    shutil.rmtree("./saved_models_test")
