import os
import re
import json
import csv
import matplotlib.pyplot as plt
import numpy as np
import time
import glob
from datetime import datetime

class TrainingLogger:
    """训练日志处理器，用于记录训练过程中的关键数据，生成论文图表和指标"""
    
    def __init__(self, log_dir, resume_session=None):
        """
        初始化训练日志记录器
        
        参数:
            log_dir: 日志目录，通常为logs/数据集名称
            resume_session: 保留参数，不再使用但保留向后兼容
        """
        self.log_dir = log_dir
        os.makedirs(self.log_dir, exist_ok=True)
        
        # 检查是否存在训练进度文件
        training_progress_path = os.path.join(self.log_dir, "training_progress.json")
        if os.path.exists(training_progress_path):
            # 继续使用现有会话
            print(f"继续使用现有训练记录: {self.log_dir}")
            # 加载现有的训练数据
            self.training_data = self._load_training_data()
            # 确保session_id有值（兼容旧版）
            if "session_id" not in self.training_data:
                self.training_data["session_id"] = os.path.basename(self.log_dir)
        else:
            # 创建新会话
            print(f"创建新训练记录: {self.log_dir}")
            self.training_data = {
                "session_id": os.path.basename(self.log_dir),
                "start_time": time.time(),
                "hyperparameters": {},
                "dataset_info": {},
                "model_info": {},
                "batch_losses": [],
                "epoch_losses": [],
                "end_time": None,
                "total_time": None
            }
        
        # 保存训练数据
        self._save_training_progress()
    
    def _load_training_data(self):
        """加载现有的训练数据"""
        training_progress_path = os.path.join(self.log_dir, "training_progress.json")
        if os.path.exists(training_progress_path):
            with open(training_progress_path, 'r') as f:
                return json.load(f)
        else:
            # 如果训练进度文件不存在，创建一个新的
            return {
                "session_id": os.path.basename(self.log_dir),
                "start_time": time.time(),
                "hyperparameters": {},
                "dataset_info": {},
                "model_info": {},
                "batch_losses": [],
                "epoch_losses": [],
                "end_time": None,
                "total_time": None
            }
    
    def _save_training_progress(self):
        """保存训练进度数据到JSON文件"""
        with open(os.path.join(self.log_dir, "training_progress.json"), 'w') as f:
            json.dump(self.training_data, f, indent=2)
    
    def log_hyperparameters(self, args):
        """
        记录超参数
        
        参数:
            args: 命令行参数或包含超参数的对象
        """
        # 将args转换为字典
        if hasattr(args, '__dict__'):
            hyperparameters = vars(args)
        else:
            hyperparameters = dict(args)
        
        self.training_data["hyperparameters"] = hyperparameters
        
        # 保存超参数到JSON文件
        with open(os.path.join(self.log_dir, "hyperparameters.json"), 'w') as f:
            json.dump(hyperparameters, f, indent=2)
        
        self._save_training_progress()
    
    def log_dataset_info(self, train_size, val_size, img_size, data_dir):
        """
        记录数据集信息
        
        参数:
            train_size: 训练集大小
            val_size: 验证集大小
            img_size: 图像大小
            data_dir: 数据集目录
        """
        dataset_info = {
            "train_size": train_size,
            "val_size": val_size,
            "img_size": img_size,
            "data_dir": data_dir
        }
        
        self.training_data["dataset_info"] = dataset_info
        
        # 保存数据集信息到JSON文件
        with open(os.path.join(self.log_dir, "dataset_info.json"), 'w') as f:
            json.dump(dataset_info, f, indent=2)
        
        self._save_training_progress()
    
    def log_model_info(self, generator, discriminator):
        """
        记录模型信息
        
        参数:
            generator: 生成器模型
            discriminator: 判别器模型
        """
        # 计算模型参数量
        gen_params = sum(p.numel() for p in generator.parameters())
        disc_params = sum(p.numel() for p in discriminator.parameters())
        
        model_info = {
            "generator_params": gen_params,
            "discriminator_params": disc_params,
            "total_params": gen_params + disc_params
        }
        
        self.training_data["model_info"] = model_info
        
        # 保存模型信息到JSON文件
        with open(os.path.join(self.log_dir, "model_info.json"), 'w') as f:
            json.dump(model_info, f, indent=2)
        
        self._save_training_progress()
    
    def log_batch(self, epoch, batch_idx, total_batches, g_loss, d_loss):
        """
        记录批次级别的损失
        
        参数:
            epoch: 当前轮次
            batch_idx: 批次索引
            total_batches: 总批次数
            g_loss: 生成器损失
            d_loss: 判别器损失
        """
        batch_data = {
            "epoch": epoch,
            "batch": batch_idx,
            "progress": batch_idx / total_batches,
            "g_loss": g_loss,
            "d_loss": d_loss,
            "timestamp": time.time()
        }
        
        self.training_data["batch_losses"].append(batch_data)
        
        # 每10个批次保存一次训练进度
        if batch_idx % 10 == 0:
            self._save_training_progress()
    
    def log_epoch(self, epoch, g_train_loss, d_train_loss, g_val_loss, d_val_loss):
        """
        记录轮次级别的损失
        
        参数:
            epoch: 当前轮次
            g_train_loss: 生成器训练损失
            d_train_loss: 判别器训练损失
            g_val_loss: 生成器验证损失
            d_val_loss: 判别器验证损失
        """
        epoch_data = {
            "epoch": epoch,
            "g_train_loss": g_train_loss,
            "d_train_loss": d_train_loss,
            "g_val_loss": g_val_loss,
            "d_val_loss": d_val_loss,
            "timestamp": time.time()
        }
        
        # 检查是否有重复的轮次记录
        for i, existing_epoch in enumerate(self.training_data["epoch_losses"]):
            if existing_epoch["epoch"] == epoch:
                # 更新现有记录
                self.training_data["epoch_losses"][i] = epoch_data
                break
        else:
            # 添加新记录
            self.training_data["epoch_losses"].append(epoch_data)
        
        # 保存轮次损失到CSV文件
        self._save_epoch_losses_csv()
        
        # 绘制损失曲线
        self._plot_loss_curves()
        
        # 绘制批次损失曲线
        self._plot_batch_loss_curves()
        
        # 保存训练进度
        self._save_training_progress()
        
        # 更新训练报告
        self._generate_training_report()
    
    def _save_epoch_losses_csv(self):
        """将轮次损失保存为CSV格式"""
        csv_path = os.path.join(self.log_dir, "epoch_losses.csv")
        with open(csv_path, 'w', newline='') as f:
            writer = csv.writer(f)
            writer.writerow(["epoch", "g_train_loss", "d_train_loss", "g_val_loss", "d_val_loss"])
            
            for epoch_data in self.training_data["epoch_losses"]:
                writer.writerow([
                    epoch_data["epoch"],
                    epoch_data["g_train_loss"],
                    epoch_data["d_train_loss"],
                    epoch_data["g_val_loss"],
                    epoch_data["d_val_loss"]
                ])
    
    def _plot_loss_curves(self):
        """绘制损失曲线"""
        if not self.training_data["epoch_losses"]:
            return
        
        epochs = [epoch["epoch"] for epoch in self.training_data["epoch_losses"]]
        g_train_losses = [epoch["g_train_loss"] for epoch in self.training_data["epoch_losses"]]
        d_train_losses = [epoch["d_train_loss"] for epoch in self.training_data["epoch_losses"]]
        g_val_losses = [epoch["g_val_loss"] for epoch in self.training_data["epoch_losses"]]
        d_val_losses = [epoch["d_val_loss"] for epoch in self.training_data["epoch_losses"]]
        
        plt.figure(figsize=(12, 10))
        
        # 生成器损失
        plt.subplot(2, 1, 1)
        plt.plot(epochs, g_train_losses, 'b-o', label='Generator Training Loss', linewidth=2)
        plt.plot(epochs, g_val_losses, 'b--o', label='Generator Validation Loss', linewidth=2)
        
        # 添加数据标签
        for i, (g_train, g_val) in enumerate(zip(g_train_losses, g_val_losses)):
            plt.text(epochs[i], g_train, f'{g_train:.1f}', ha='center', va='bottom')
            plt.text(epochs[i], g_val, f'{g_val:.1f}', ha='center', va='top')
        
        plt.xlabel('Epoch')
        plt.ylabel('Generator Loss')
        plt.title('Generator Training and Validation Losses')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        # 判别器损失
        plt.subplot(2, 1, 2)
        plt.plot(epochs, d_train_losses, 'r-o', label='Discriminator Training Loss', linewidth=2)
        plt.plot(epochs, d_val_losses, 'r--o', label='Discriminator Validation Loss', linewidth=2)
        
        # 添加数据标签
        for i, (d_train, d_val) in enumerate(zip(d_train_losses, d_val_losses)):
            plt.text(epochs[i], d_train, f'{d_train:.2f}', ha='center', va='bottom')
            plt.text(epochs[i], d_val, f'{d_val:.2f}', ha='center', va='top')
        
        # 设置Y轴范围
        all_d_losses = d_train_losses + d_val_losses
        if all_d_losses:
            # 计算合理的上下限
            d_min = max(0, min(all_d_losses) * 0.8)  # 下限不低于0
            d_max = max(all_d_losses) * 1.2  # 上限增加20%
            
            # 确保Y轴范围至少为0-1
            if d_max < 1.0:
                d_max = 1.0
            
            # 如果范围太小，适当扩大
            if d_max - d_min < 0.5:
                d_mean = (d_max + d_min) / 2
                d_min = max(0, d_mean - 0.25)
                d_max = d_mean + 0.25
                
            plt.ylim(d_min, d_max)
        
        # 添加理想范围标记
        plt.axhspan(0.5, 0.7, alpha=0.15, color='green', label='Ideal Range')
        plt.axhline(y=0.5, color='green', linestyle='--', alpha=0.5)
        plt.axhline(y=0.7, color='green', linestyle='--', alpha=0.5)
        
        plt.xlabel('Epoch')
        plt.ylabel('Discriminator Loss')
        plt.title('Discriminator Training and Validation Losses')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        plt.suptitle('Training and Validation Losses per Epoch', fontsize=16)
        plt.tight_layout()
        plt.subplots_adjust(top=0.92)  # 为总标题留出空间
        
        # 保存图像
        plt.savefig(os.path.join(self.log_dir, "loss_curves.png"), dpi=150)
        plt.close()
    
    def _plot_batch_loss_curves(self):
        """绘制批次损失曲线"""
        if not self.training_data["batch_losses"]:
            return
        
        # 获取每个批次的数据
        batch_data = self.training_data["batch_losses"]
        
        # 按轮次分组
        epochs = {}
        for b in batch_data:
            epoch = b["epoch"]
            if epoch not in epochs:
                epochs[epoch] = []
            epochs[epoch].append(b)
        
        plt.figure(figsize=(14, 10))
        
        # 创建颜色映射，使不同轮次使用不同颜色
        colors = plt.cm.viridis(np.linspace(0, 1, len(epochs)))
        
        # 绘制生成器损失
        plt.subplot(2, 1, 1)
        
        all_g_losses = []
        all_global_steps = []
        # 每轮使用不同颜色
        for i, (epoch, epoch_data) in enumerate(sorted(epochs.items())):
            batches = [b["batch"] for b in epoch_data]
            g_losses = [b["g_loss"] for b in epoch_data]
            
            # 计算这一轮的全局步数
            max_batch = max([max(epochs[e], key=lambda x: x["batch"])["batch"] for e in epochs if e <= epoch], default=0)
            global_steps = [(epoch - 1) * max_batch + b for b in batches]
            
            # 绘制原始数据点
            plt.plot(global_steps, g_losses, 'o-', color=colors[i], alpha=0.6, 
                     label=f'Epoch {epoch}', markersize=4)
            
            # 收集所有数据用于计算移动平均线
            all_g_losses.extend(g_losses)
            all_global_steps.extend(global_steps)
        
        # 如果有足够多的数据点，添加移动平均线
        if len(all_g_losses) > 5:
            # 对数据点按全局步数排序
            sorted_data = sorted(zip(all_global_steps, all_g_losses))
            sorted_steps = [x[0] for x in sorted_data]
            sorted_losses = [x[1] for x in sorted_data]
            
            # 计算移动平均
            window_size = min(5, len(sorted_losses) // 3)
            if window_size > 1:
                weights = np.ones(window_size) / window_size
                smoothed_losses = np.convolve(sorted_losses, weights, mode='valid')
                smoothed_steps = sorted_steps[window_size-1:]
                plt.plot(smoothed_steps, smoothed_losses, 'r-', linewidth=2, 
                         label=f'Moving Average (window={window_size})')
        
        # 添加垂直线分隔不同轮次
        epoch_boundaries = {}
        for epoch in sorted(epochs.keys())[1:]:  # 从第二个轮次开始
            if epoch-1 in epochs and epochs[epoch-1]:
                max_batch_prev = max(epochs[epoch-1], key=lambda x: x["batch"])["batch"]
                min_batch_curr = min(epochs[epoch], key=lambda x: x["batch"])["batch"]
                max_batch = max([max(epochs[e], key=lambda x: x["batch"])["batch"] for e in epochs if e <= epoch], default=0)
                boundary = (epoch - 1) * max_batch  # 轮次边界
                plt.axvline(x=boundary, color='gray', linestyle='--', alpha=0.5)
                # 添加轮次标签
                plt.text(boundary, plt.ylim()[1] * 0.9, f'Epoch {epoch}', 
                         horizontalalignment='center', verticalalignment='top',
                         bbox=dict(facecolor='white', alpha=0.8))
        
        plt.ylabel('Generator Loss')
        plt.title('Batch-level Generator Loss')
        plt.grid(True, alpha=0.3)
        plt.legend(loc='upper right')
        
        # 绘制判别器损失
        plt.subplot(2, 1, 2)
        
        all_d_losses = []
        # 每轮使用不同颜色
        for i, (epoch, epoch_data) in enumerate(sorted(epochs.items())):
            batches = [b["batch"] for b in epoch_data]
            d_losses = [b["d_loss"] for b in epoch_data]
            
            # 计算这一轮的全局步数
            max_batch = max([max(epochs[e], key=lambda x: x["batch"])["batch"] for e in epochs if e <= epoch], default=0)
            global_steps = [(epoch - 1) * max_batch + b for b in batches]
            
            # 绘制原始数据点
            plt.plot(global_steps, d_losses, 'o-', color=colors[i], alpha=0.6, 
                     label=f'Epoch {epoch}', markersize=4)
            
            # 收集所有数据用于计算移动平均线
            all_d_losses.extend(d_losses)
            
        # 如果有足够多的数据点，添加移动平均线
        if len(all_d_losses) > 5:
            # 对数据点按全局步数排序
            sorted_data = sorted(zip(all_global_steps, all_d_losses))
            sorted_steps = [x[0] for x in sorted_data]
            sorted_losses = [x[1] for x in sorted_data]
            
            # 计算移动平均
            window_size = min(5, len(sorted_losses) // 3)
            if window_size > 1:
                weights = np.ones(window_size) / window_size
                smoothed_losses = np.convolve(sorted_losses, weights, mode='valid')
                smoothed_steps = sorted_steps[window_size-1:]
                plt.plot(smoothed_steps, smoothed_losses, 'r-', linewidth=2, 
                         label=f'Moving Average (window={window_size})')
        
        # 添加垂直线分隔不同轮次
        for epoch in sorted(epochs.keys())[1:]:  # 从第二个轮次开始
            if epoch-1 in epochs and epochs[epoch-1]:
                max_batch = max([max(epochs[e], key=lambda x: x["batch"])["batch"] for e in epochs if e <= epoch], default=0)
                boundary = (epoch - 1) * max_batch  # 轮次边界
                plt.axvline(x=boundary, color='gray', linestyle='--', alpha=0.5)
                # 添加轮次标签
                plt.text(boundary, plt.ylim()[1] * 0.9, f'Epoch {epoch}', 
                         horizontalalignment='center', verticalalignment='top',
                         bbox=dict(facecolor='white', alpha=0.8))
        
        # 设置固定的Y轴范围，使判别器损失的波动更加明显
        if all_d_losses:
            # 计算合理的上下限
            d_min = max(0, min(all_d_losses) * 0.8)  # 下限不低于0
            d_max = max(all_d_losses) * 1.2  # 上限增加20%
            
            # 确保Y轴范围至少为0-1
            if d_max < 1.0:
                d_max = 1.0
            
            # 如果范围太小，适当扩大
            if d_max - d_min < 0.5:
                d_mean = (d_max + d_min) / 2
                d_min = max(0, d_mean - 0.25)
                d_max = d_mean + 0.25
                
            plt.ylim(d_min, d_max)
            
            # 添加水平参考线，表示理想范围
            plt.axhspan(0.5, 0.7, alpha=0.15, color='green', label='Ideal Range')
            plt.axhline(y=0.5, color='green', linestyle='--', alpha=0.5)
            plt.axhline(y=0.7, color='green', linestyle='--', alpha=0.5)
        
        plt.xlabel('Steps')
        plt.ylabel('Discriminator Loss')
        plt.title('Batch-level Discriminator Loss')
        plt.grid(True, alpha=0.3)
        plt.legend(loc='upper right')
        
        # 添加总标题
        plt.suptitle('Training Loss per Batch', fontsize=16)
        
        plt.tight_layout()
        plt.subplots_adjust(top=0.92)  # 为总标题留出空间
        plt.savefig(os.path.join(self.log_dir, "batch_loss_curves.png"), dpi=150)
        plt.close()
    
    def log_training_complete(self):
        """记录训练完成信息"""
        # 记录结束时间
        self.training_data["end_time"] = time.time()
        self.training_data["total_time"] = self.training_data["end_time"] - self.training_data["start_time"]
        
        # 生成训练摘要
        training_summary = {
            "session_id": self.training_data["session_id"],
            "total_epochs": len(self.training_data["epoch_losses"]),
            "training_time_seconds": self.training_data["total_time"],
            "training_time_hours": self.training_data["total_time"] / 3600,
            "final_g_train_loss": self.training_data["epoch_losses"][-1]["g_train_loss"] if self.training_data["epoch_losses"] else None,
            "final_d_train_loss": self.training_data["epoch_losses"][-1]["d_train_loss"] if self.training_data["epoch_losses"] else None,
            "final_g_val_loss": self.training_data["epoch_losses"][-1]["g_val_loss"] if self.training_data["epoch_losses"] else None,
            "final_d_val_loss": self.training_data["epoch_losses"][-1]["d_val_loss"] if self.training_data["epoch_losses"] else None
        }
        
        # 找出最佳验证损失
        if self.training_data["epoch_losses"]:
            best_g_val_epoch = min(self.training_data["epoch_losses"], key=lambda x: x["g_val_loss"])
            training_summary["best_g_val_loss"] = best_g_val_epoch["g_val_loss"]
            training_summary["best_g_val_epoch"] = best_g_val_epoch["epoch"]
        
        # 保存训练摘要
        with open(os.path.join(self.log_dir, "training_summary.json"), 'w') as f:
            json.dump(training_summary, f, indent=2)
        
        # 保存最终训练进度
        self._save_training_progress()
        
        # 更新训练报告
        self._generate_training_report()
    
    def _generate_training_report(self):
        """生成训练报告（Markdown格式）"""
        if not self.training_data["hyperparameters"]:
            return
        
        report_path = os.path.join(self.log_dir, "training_report.md")
        
        with open(report_path, 'w') as f:
            f.write("# 图像着色训练报告 / Image Colorization Training Report\n\n")
            
            # 训练摘要 / Training Summary
            f.write("## 训练摘要 / Training Summary\n")
            f.write(f"- 会话ID / Session ID: {self.training_data['session_id']}\n")
            total_epochs = len(self.training_data["epoch_losses"])
            f.write(f"- 训练总轮次 / Total Epochs: {total_epochs}\n")
            
            if self.training_data["end_time"]:
                total_time = self.training_data["end_time"] - self.training_data["start_time"]
                f.write(f"- 训练时间 / Training Time: {total_time/3600:.2f} 小时/hours ({total_time:.2f} 秒/seconds)\n")
            f.write("\n")
            
            # 模型信息 / Model Information
            if self.training_data["model_info"]:
                f.write("## 模型信息 / Model Information\n")
                f.write(f"- 生成器参数量 / Generator Parameters: {self.training_data['model_info']['generator_params']}\n")
                f.write(f"- 判别器参数量 / Discriminator Parameters: {self.training_data['model_info']['discriminator_params']}\n")
                f.write(f"- 总参数量 / Total Parameters: {self.training_data['model_info']['total_params']}\n")
                f.write("\n")
            
            # 数据集信息 / Dataset Information
            if self.training_data["dataset_info"]:
                f.write("## 数据集信息 / Dataset Information\n")
                f.write(f"- 训练集大小 / Training Set Size: {self.training_data['dataset_info']['train_size']}\n")
                f.write(f"- 验证集大小 / Validation Set Size: {self.training_data['dataset_info']['val_size']}\n")
                f.write(f"- 图像大小 / Image Size: {self.training_data['dataset_info']['img_size']}\n")
                f.write(f"- 数据集目录 / Dataset Directory: {self.training_data['dataset_info']['data_dir']}\n")
                f.write("\n")
            
            # 超参数 / Hyperparameters
            f.write("## 训练超参数 / Hyperparameters\n")
            for key, value in self.training_data["hyperparameters"].items():
                f.write(f"- {key}: {value}\n")
            f.write("\n")
            
            # 训练结果 / Training Results
            if self.training_data["epoch_losses"]:
                f.write("## 训练结果 / Results\n")
                latest_epoch = self.training_data["epoch_losses"][-1]
                f.write(f"- 最终生成器训练损失 / Final Generator Training Loss: {latest_epoch['g_train_loss']:.4f}\n")
                f.write(f"- 最终判别器训练损失 / Final Discriminator Training Loss: {latest_epoch['d_train_loss']:.4f}\n")
                f.write(f"- 最终生成器验证损失 / Final Generator Validation Loss: {latest_epoch['g_val_loss']:.4f}\n")
                f.write(f"- 最终判别器验证损失 / Final Discriminator Validation Loss: {latest_epoch['d_val_loss']:.4f}\n")
                
                # 最佳验证损失 / Best Validation Loss
                best_g_val_epoch = min(self.training_data["epoch_losses"], key=lambda x: x["g_val_loss"])
                f.write(f"- 最佳生成器验证损失 / Best Generator Validation Loss: {best_g_val_epoch['g_val_loss']:.4f} (轮次/Epoch {best_g_val_epoch['epoch']})\n")
                f.write("\n")
            
            # 图表 / Charts
            f.write("## 图表 / Charts\n")
            f.write("- 损失曲线 / Loss Curves: (见/see loss_curves.png)\n")
            f.write("- 批次损失曲线 / Batch Loss Curves: (见/see batch_loss_curves.png)\n")
            f.write("\n")
            
            # 训练收敛性分析 / Convergence Analysis
            if len(self.training_data["epoch_losses"]) > 1:
                f.write("## 训练收敛性分析 / Convergence Analysis\n")
                # 计算最后几轮的平均改进率
                last_epochs = min(5, len(self.training_data["epoch_losses"]))
                if last_epochs > 1:
                    recent_epochs = self.training_data["epoch_losses"][-last_epochs:]
                    first_loss = recent_epochs[0]["g_val_loss"]
                    last_loss = recent_epochs[-1]["g_val_loss"]
                    if first_loss > 0:  # 避免除零错误
                        improvement_rate = (first_loss - last_loss) / first_loss * 100
                        f.write(f"- 最近{last_epochs}轮验证损失改进率 / Recent {last_epochs} epochs improvement rate: {improvement_rate:.2f}%\n")
                
                # 添加收敛性评估
                if len(self.training_data["epoch_losses"]) >= 3:
                    recent_losses = [e["g_val_loss"] for e in self.training_data["epoch_losses"][-3:]]
                    loss_diff = max(recent_losses) - min(recent_losses)
                    avg_loss = sum(recent_losses) / len(recent_losses)
                    
                    if avg_loss > 0:  # 避免除零错误
                        relative_change = loss_diff / avg_loss
                        
                        if relative_change < 0.01:
                            f.write("- 收敛状态 / Convergence Status: 已基本收敛 / Converged\n")
                        elif relative_change < 0.05:
                            f.write("- 收敛状态 / Convergence Status: 接近收敛 / Near Convergence\n")
                        else:
                            f.write("- 收敛状态 / Convergence Status: 仍在训练中 / Still Training\n")
                f.write("\n")
            
            # 建议 / Recommendations
            f.write("## 建议 / Recommendations\n")
            
            # 根据训练状态给出建议
            if self.training_data["epoch_losses"]:
                latest_epoch = self.training_data["epoch_losses"][-1]
                if len(self.training_data["epoch_losses"]) > 1:
                    # 检查是否有过拟合迹象
                    if latest_epoch["g_train_loss"] < 0.5 * latest_epoch["g_val_loss"]:
                        f.write("- 训练/验证损失差距较大，可能存在过拟合风险。考虑使用更多数据或添加正则化。\n")
                        f.write("- Large gap between training and validation loss suggests potential overfitting. Consider using more data or adding regularization.\n")
                    
                    # 检查是否需要更多训练
                    recent_epochs = self.training_data["epoch_losses"][-min(3, len(self.training_data["epoch_losses"])):]
                    recent_improvements = [recent_epochs[i-1]["g_val_loss"] - recent_epochs[i]["g_val_loss"] for i in range(1, len(recent_epochs))]
                    
                    if all(imp > 0.1 for imp in recent_improvements):
                        f.write("- 验证损失仍在显著下降，建议继续训练更多轮次。\n")
                        f.write("- Validation loss is still decreasing significantly, suggest continuing training for more epochs.\n")
                    elif all(imp < 0.01 for imp in recent_improvements) and len(recent_epochs) >= 3:
                        f.write("- 验证损失改善很小，模型可能已经收敛。\n")
                        f.write("- Validation loss improvement is minimal, model may have converged.\n")
                
                # 基于损失值的一般建议
                if latest_epoch["g_val_loss"] > 30:
                    f.write("- 损失值仍然较高，建议尝试调整学习率或网络结构。\n")
                    f.write("- Loss values are still high, consider adjusting learning rate or network architecture.\n")
                elif latest_epoch["d_val_loss"] < 0.2:
                    f.write("- 判别器损失较低，可能需要平衡生成器和判别器的训练。\n")
                    f.write("- Discriminator loss is low, might need to balance generator and discriminator training.\n")
            else:
                f.write("- 尚无足够数据提供建议。\n")
                f.write("- Not enough data to provide recommendations yet.\n")
            f.write("\n")

    @classmethod
    def find_latest_session(cls, log_dir):
        """检查日志目录是否存在（简化后的方法）"""
        if os.path.exists(log_dir):
            return os.path.basename(log_dir)
        return None

    @classmethod
    def find_latest_checkpoint(cls, output_dir):
        """查找最新的检查点文件"""
        checkpoint_dir = os.path.join(output_dir, "checkpoints")
        if not os.path.exists(checkpoint_dir):
            return None
            
        # 首先检查是否有final检查点
        final_checkpoint = os.path.join(checkpoint_dir, "generator_final.pth")
        if os.path.exists(final_checkpoint):
            return final_checkpoint
            
        # 查找所有generator_epoch_*.pth检查点
        checkpoints = glob.glob(os.path.join(checkpoint_dir, "generator_epoch_*.pth"))
        if not checkpoints:
            return None
            
        # 提取轮次号
        epoch_pattern = re.compile(r'generator_epoch_(\d+)\.pth')
        checkpoint_epochs = []
        for checkpoint in checkpoints:
            match = epoch_pattern.search(checkpoint)
            if match:
                epoch = int(match.group(1))
                checkpoint_epochs.append((epoch, checkpoint))
                
        if not checkpoint_epochs:
            return None
            
        # 获取最新的检查点
        latest_checkpoint = max(checkpoint_epochs, key=lambda x: x[0])
        return latest_checkpoint[1]


# 示例用法
if __name__ == "__main__":
    # 解析现有的训练日志文件
    import argparse
    parser = argparse.ArgumentParser(description='解析训练日志文件并生成报告')
    parser.add_argument('--log_file', type=str, required=True, help='训练日志文件路径')
    args = parser.parse_args()
    
    logger = TrainingLogger()
    
    # 解析日志文件
    with open(args.log_file, 'r') as f:
        log_content = f.read()
    
    # 提取训练信息
    # 这里需要根据实际日志格式调整正则表达式
    epoch_pattern = r"Epoch (\d+)/\d+"
    train_loss_pattern = r"Training - Generator loss: ([\d\.]+), Discriminator loss: ([\d\.]+)"
    val_loss_pattern = r"Validation - Generator loss: ([\d\.]+), Discriminator loss: ([\d\.]+)"
    batch_pattern = r"Batch \[(\d+)/\d+\] - D loss: ([\d\.]+), G loss: ([\d\.]+)"
    
    # 获取数据集信息
    dataset_match = re.search(r"Training set size: (\d+), Validation set size: (\d+)", log_content)
    if dataset_match:
        train_size = int(dataset_match.group(1))
        val_size = int(dataset_match.group(2))
        logger.log_dataset_info(train_size, val_size, 256, "Unknown")
    
    # 获取每个epoch的损失
    current_epoch = None
    for line in log_content.splitlines():
        epoch_match = re.search(epoch_pattern, line)
        if epoch_match:
            current_epoch = int(epoch_match.group(1))
        
        if current_epoch is not None:
            batch_match = re.search(batch_pattern, line)
            if batch_match:
                batch_idx = int(batch_match.group(1))
                d_loss = float(batch_match.group(2))
                g_loss = float(batch_match.group(3))
                logger.log_batch(current_epoch, batch_idx, 100, g_loss, d_loss)
            
            train_loss_match = re.search(train_loss_pattern, line)
            if train_loss_match:
                g_train_loss = float(train_loss_match.group(1))
                d_train_loss = float(train_loss_match.group(2))
                
                # 寻找对应的验证损失
                val_loss_match = None
                for val_line in log_content.splitlines():
                    if "Validation" in val_line and f"Epoch {current_epoch}" in log_content[:log_content.index(val_line)]:
                        val_loss_match = re.search(val_loss_pattern, val_line)
                        break
                
                if val_loss_match:
                    g_val_loss = float(val_loss_match.group(1))
                    d_val_loss = float(val_loss_match.group(2))
                    logger.log_epoch(current_epoch, g_train_loss, d_train_loss, g_val_loss, d_val_loss)
    
    logger.log_training_complete()
    print(f"报告已生成: {logger.log_dir}/training_report.md") 