import os
import sys
import time
import json
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.metrics import confusion_matrix, classification_report
from tqdm import tqdm
from datetime import datetime

# 配置matplotlib支持中文显示
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS', 'SimHei', 'Microsoft YaHei']  # 优先使用这些中文字体
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
plt.rcParams['font.family'] = 'sans-serif'  # 使用无衬线字体

# 添加项目根目录到路径
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

from models.image_classifier import ImageClassifier
from training.train_classifier import create_data_loaders

class ClassifierTrainingLogger:
    """分类器训练日志记录器"""
    
    def __init__(self, log_dir, resume_session=None):
        """
        初始化日志记录器
        
        参数:
            log_dir (str): 日志目录
            resume_session (str, optional): 继续的会话ID，如果为None则创建新会话（未使用）
        """
        # 直接使用指定的日志目录，不创建子目录
        self.log_dir = log_dir
        os.makedirs(self.log_dir, exist_ok=True)
        
        # 训练进度文件
        self.progress_file = os.path.join(self.log_dir, "training_progress.json")
        self.progress_data = {}
        
        # 如果继续训练，加载现有进度数据
        if os.path.exists(self.progress_file):
            try:
                with open(self.progress_file, 'r') as f:
                    self.progress_data = json.load(f)
                print(f"加载现有训练记录: {self.progress_file}")
            except Exception as e:
                print(f"无法加载现有训练记录: {e}")
                self.progress_data = {}
        
        # 创建报告文件
        self.report_file = os.path.join(self.log_dir, "training_report.md")
        
        # 混淆矩阵目录
        self.cm_dir = os.path.join(self.log_dir, "confusion_matrices")
        os.makedirs(self.cm_dir, exist_ok=True)
    
    def log_hyperparameters(self, args):
        """记录超参数"""
        # 将args转换为字典
        if hasattr(args, "__dict__"):
            hyperparams = args.__dict__
        else:
            hyperparams = {k: getattr(args, k) for k in dir(args) if not k.startswith('_') and not callable(getattr(args, k))}
        
        # 保存超参数
        hyperparams_file = os.path.join(self.log_dir, "hyperparameters.json")
        with open(hyperparams_file, 'w') as f:
            json.dump(hyperparams, f, indent=2)
        
        # 更新进度数据
        self.progress_data.update(hyperparams)
        self._save_progress()
    
    def log_dataset_info(self, train_size, val_size, image_size, data_dir):
        """记录数据集信息"""
        dataset_info = {
            "train_size": train_size,
            "val_size": val_size,
            "image_size": image_size,
            "data_dir": data_dir
        }
        
        # 更新进度数据
        self.progress_data.update(dataset_info)
        self._save_progress()
    
    def log_epoch(self, epoch, train_loss, train_acc, val_loss, val_acc, learning_rate, class_names, conf_matrix=None):
        """
        记录每个训练轮次的结果
        
        参数:
            epoch (int): 当前轮次
            train_loss (float): 训练损失
            train_acc (float): 训练准确率
            val_loss (float): 验证损失
            val_acc (float): 验证准确率
            learning_rate (float): 当前学习率
            class_names (list): 类别名称列表
            conf_matrix (numpy.ndarray, optional): 混淆矩阵
        """
        # 创建轮次数据
        epoch_data = {
            "epoch": epoch,
            "train_loss": float(train_loss),
            "train_acc": float(train_acc),
            "val_loss": float(val_loss),
            "val_acc": float(val_acc),
            "learning_rate": float(learning_rate)
        }
        
        # 确保progress_data中有epochs键，且其值为列表
        if "epochs" not in self.progress_data:
            self.progress_data["epochs"] = []
        
        # 如果epochs不是列表类型，将其重置为空列表
        if not isinstance(self.progress_data["epochs"], list):
            print(f"警告: epochs键不是列表类型 ({type(self.progress_data['epochs']).__name__})，重置为空列表")
            self.progress_data["epochs"] = []
        
        # 检查是否已存在该轮次的数据
        epoch_exists = False
        for i in range(len(self.progress_data["epochs"])):
            if self.progress_data["epochs"][i]["epoch"] == epoch:
                self.progress_data["epochs"][i] = epoch_data
                epoch_exists = True
                break
        
        if not epoch_exists:
            self.progress_data["epochs"].append(epoch_data)
        
        self._save_progress()
        
        # 如果提供了混淆矩阵，保存并可视化
        if conf_matrix is not None:
            # 保存混淆矩阵数据
            cm_file = os.path.join(self.cm_dir, f"confusion_matrix_epoch_{epoch}.npy")
            np.save(cm_file, conf_matrix)
            
            # 绘制混淆矩阵图
            plt.figure(figsize=(8, 6))
            sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues',
                        xticklabels=class_names, yticklabels=class_names)
            plt.xlabel('Predicted')
            plt.ylabel('True')
            plt.title(f'Confusion Matrix - Epoch {epoch}')
            plt.tight_layout()
            
            # 保存图像
            cm_img_file = os.path.join(self.cm_dir, f"confusion_matrix_epoch_{epoch}.png")
            plt.savefig(cm_img_file)
            plt.close()
    
    def generate_report(self, total_time, final_train_acc, final_val_acc, class_report=None):
        """
        生成培训报告
        
        参数:
            total_time (float): 总训练时间（秒）
            final_train_acc (float): 最终训练准确率
            final_val_acc (float): 最终验证准确率
            class_report (dict, optional): 分类报告（sklearn.metrics.classification_report结果）
        """
        # 计算时间
        hours, remainder = divmod(total_time, 3600)
        minutes, seconds = divmod(remainder, 60)
        
        # 创建报告内容
        report = [
            "# 人脸-花卉分类器训练报告\n",
            f"## 训练摘要\n",
            f"- 训练时间: {int(hours)}小时 {int(minutes)}分钟 {seconds:.2f}秒\n",
            f"- 最终训练准确率: {final_train_acc:.2%}\n",
            f"- 最终验证准确率: {final_val_acc:.2%}\n\n"
        ]
        
        # 超参数
        report.append(f"## 超参数\n")
        if "batch_size" in self.progress_data:
            report.append(f"- batch_size: {self.progress_data['batch_size']}\n")
        if "epochs" in self.progress_data and isinstance(self.progress_data["epochs"], list):
            report.append(f"- epochs: {len(self.progress_data['epochs'])}\n")
        if "lr" in self.progress_data:
            report.append(f"- learning_rate: {self.progress_data['lr']}\n")
        report.append("\n")
        
        # 数据集信息
        report.append(f"## 数据集信息\n")
        if "train_size" in self.progress_data:
            report.append(f"- 训练集大小: {self.progress_data['train_size']}图像\n")
        if "val_size" in self.progress_data:
            report.append(f"- 验证集大小: {self.progress_data['val_size']}图像\n")
        if "image_size" in self.progress_data:
            report.append(f"- 图像大小: {self.progress_data['image_size']}x{self.progress_data['image_size']}\n")
        report.append("\n")
        
        # 训练进度
        report.append(f"## 训练进度\n")
        report.append("| 轮次 | 训练损失 | 训练准确率 | 验证损失 | 验证准确率 | 学习率 |\n")
        report.append("|------|----------|------------|----------|------------|--------|\n")
        
        if "epochs" in self.progress_data and isinstance(self.progress_data["epochs"], list):
            for epoch_data in self.progress_data["epochs"]:
                report.append(f"| {epoch_data['epoch']} | {epoch_data['train_loss']:.4f} | {epoch_data['train_acc']:.2%} | {epoch_data['val_loss']:.4f} | {epoch_data['val_acc']:.2%} | {epoch_data['learning_rate']:.6f} |\n")
        
        report.append("\n")
        
        # 分类报告
        if class_report:
            report.append(f"## 分类报告\n")
            report.append("```\n")
            if isinstance(class_report, dict):
                # 格式化字典报告
                max_len = max(len(k) for k in class_report.keys() if isinstance(k, str))
                for k, v in class_report.items():
                    if isinstance(k, str) and isinstance(v, dict):
                        precision = v['precision'] if 'precision' in v else 0
                        recall = v['recall'] if 'recall' in v else 0
                        f1 = v['f1-score'] if 'f1-score' in v else 0
                        support = v['support'] if 'support' in v else 0
                        report.append(f"{k.ljust(max_len)} | precision: {precision:.2f} | recall: {recall:.2f} | f1-score: {f1:.2f} | support: {support}\n")
            else:
                # 假设它是字符串格式
                report.append(str(class_report))
            report.append("```\n\n")
        
        # 混淆矩阵图像
        report.append(f"## 混淆矩阵\n")
        if "epochs" in self.progress_data and isinstance(self.progress_data["epochs"], list) and len(self.progress_data["epochs"]) > 0:
            last_epoch = self.progress_data["epochs"][-1]["epoch"]
            cm_img_file = f"confusion_matrices/confusion_matrix_epoch_{last_epoch}.png"
            if os.path.exists(os.path.join(self.log_dir, cm_img_file)):
                report.append(f"![最终混淆矩阵]({cm_img_file})\n\n")
        
        # 写入报告文件
        with open(self.report_file, 'w') as f:
            f.writelines(report)
        
        print(f"训练报告已生成: {self.report_file}")
    
    def _save_progress(self):
        """保存进度数据"""
        with open(self.progress_file, 'w') as f:
            json.dump(self.progress_data, f, indent=2)

def train(args):
    """
    训练人脸-花卉分类器，带有详细日志记录
    
    参数:
        args: 包含训练参数的对象
    """
    # 创建日志记录器 - 直接使用args.log_dir，不创建额外的子目录
    logger = ClassifierTrainingLogger(args.log_dir)
    
    # 记录超参数
    logger.log_hyperparameters(args)
    
    # 检查CUDA是否可用
    device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    print(f"使用设备: {device}")

    # 创建数据加载器
    train_loader, val_loader, class_names = create_data_loaders(args)
    print(f"类别: {class_names}")
    
    # 记录数据集信息
    train_size = len(train_loader.dataset)
    val_size = len(val_loader.dataset)
    image_size = 224  # 假设使用的是标准的224x224图像大小
    logger.log_dataset_info(train_size, val_size, image_size, "mixed")
    
    # 创建模型
    model = ImageClassifier(num_classes=len(class_names))
    model = model.to(device)
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    
    # 学习率调度器
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=1, factor=0.5)
    
    # 设置TensorBoard - 使用专门的tensorboard_dir
    writer = SummaryWriter(log_dir=args.tensorboard_dir)
    
    # 训练开始时间
    start_time = time.time()
    
    # 训练循环
    best_val_acc = 0.0
    
    for epoch in range(args.epochs):
        print(f"Epoch {epoch+1}/{args.epochs}")
        
        # 训练阶段
        model.train()
        running_loss = 0.0
        correct = 0
        total = 0
        
        progress_bar = tqdm(train_loader, desc=f"Epoch {epoch+1}/{args.epochs} [Train]")
        for inputs, labels in progress_bar:
            inputs, labels = inputs.to(device), labels.to(device)
            
            optimizer.zero_grad()
            
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            
            running_loss += loss.item()
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            
            # 更新进度条
            progress_bar.set_postfix({
                'loss': f"{loss.item():.4f}",
                'acc': f"{100.0 * correct / total:.2f}%"
            })
        
        train_loss = running_loss / len(train_loader)
        train_accuracy = correct / total
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        val_correct = 0
        val_total = 0
        
        # 用于计算混淆矩阵
        all_preds = []
        all_labels = []
        
        val_progress_bar = tqdm(val_loader, desc=f"Epoch {epoch+1}/{args.epochs} [Val]")
        
        with torch.no_grad():
            for inputs, labels in val_progress_bar:
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                
                val_loss += loss.item()
                _, predicted = torch.max(outputs, 1)
                val_total += labels.size(0)
                val_correct += (predicted == labels).sum().item()
                
                # 收集预测和标签用于混淆矩阵
                all_preds.extend(predicted.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())
                
                # 更新进度条
                val_progress_bar.set_postfix({
                    'loss': f"{loss.item():.4f}",
                    'acc': f"{100.0 * val_correct / val_total:.2f}%"
                })
        
        val_loss = val_loss / len(val_loader)
        val_accuracy = val_correct / val_total
        
        # 计算混淆矩阵
        conf_matrix = confusion_matrix(all_labels, all_preds)
        
        # 获取当前学习率
        current_lr = optimizer.param_groups[0]['lr']
        
        # 更新学习率
        scheduler.step(val_accuracy)
        
        # 记录到TensorBoard
        writer.add_scalar('Loss/train', train_loss, epoch)
        writer.add_scalar('Loss/val', val_loss, epoch)
        writer.add_scalar('Accuracy/train', train_accuracy, epoch)
        writer.add_scalar('Accuracy/val', val_accuracy, epoch)
        writer.add_scalar('LearningRate', current_lr, epoch)
        
        # 记录到日志
        logger.log_epoch(epoch+1, train_loss, train_accuracy, val_loss, val_accuracy, 
                       current_lr, class_names, conf_matrix)
        
        # 保存混淆矩阵可视化为样本图像
        plt.figure(figsize=(10, 8))
        sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues',
                   xticklabels=class_names, yticklabels=class_names)
        plt.xlabel('Predicted Class')
        plt.ylabel('True Class')
        plt.title(f'Confusion Matrix - Epoch {epoch+1}')
        
        # 添加训练统计信息到图像
        plt.figtext(0.5, 0.01, 
                   f"Training Accuracy: {100*train_accuracy:.2f}% | Validation Accuracy: {100*val_accuracy:.2f}%",
                   ha="center", fontsize=12, bbox={"facecolor":"orange", "alpha":0.2, "pad":5})
        
        # 保存图像到samples目录
        sample_path = os.path.join(args.sample_dir, f'epoch_{epoch+1}.png')
        plt.savefig(sample_path, dpi=100, bbox_inches='tight')
        plt.close()
        print(f"Epoch {epoch+1} 样本图像已保存: {sample_path}")
        
        print(f"Epoch {epoch+1}/{args.epochs}, "
              f"Train Loss: {train_loss:.4f}, Train Acc: {100*train_accuracy:.2f}%, "
              f"Val Loss: {val_loss:.4f}, Val Acc: {100*val_accuracy:.2f}%, "
              f"LR: {current_lr:.6f}")
        
        # 保存每个 epoch 的模型
        epoch_model_path = os.path.join(args.checkpoint_dir, f'classifier_epoch_{epoch+1}.pth')
        torch.save(model.state_dict(), epoch_model_path)
        print(f"Epoch {epoch+1} 模型已保存: {epoch_model_path}")
        
        # 保存最佳模型
        if val_accuracy > best_val_acc:
            best_val_acc = val_accuracy
            best_model_path = os.path.join(args.checkpoint_dir, 'classifier_best.pth')
            torch.save(model.state_dict(), best_model_path)
            print(f"最佳模型已保存: {best_model_path} "
                  f"(验证准确率: {100*val_accuracy:.2f}%)")
    
    # 训练结束，计算总时间
    total_time = time.time() - start_time
    
    # 保存最终模型
    final_model_path = os.path.join(args.checkpoint_dir, 'classifier_final.pth')
    torch.save(model.state_dict(), final_model_path)
    print(f"最终模型已保存: {final_model_path}")
    
    # 计算分类报告
    model.eval()
    all_preds = []
    all_labels = []
    
    with torch.no_grad():
        for inputs, labels in val_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            outputs = model(inputs)
            _, predicted = torch.max(outputs, 1)
            
            all_preds.extend(predicted.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())
    
    class_report = classification_report(all_labels, all_preds, target_names=class_names, output_dict=True)
    
    # 生成最终报告
    logger.generate_report(total_time, train_accuracy, val_accuracy, class_report)
    
    print(f"训练完成! 总时间: {total_time/3600:.2f}小时")
    print(f"最终训练准确率: {100*train_accuracy:.2f}%")
    print(f"最终验证准确率: {100*val_accuracy:.2f}%")
    print(f"最佳验证准确率: {100*best_val_acc:.2f}%")
    print(f"训练日志保存在: {logger.log_dir}")
    print(f"模型检查点保存在: {args.checkpoint_dir}")
    
    # 关闭TensorBoard
    writer.close()
    
    return model 