import os
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties

# 设置中文字体
font_path = '/Users/liuyuzhuo/Library/Fonts/SimHei.ttf'  # 请确保此路径正确
font_prop = FontProperties(fname=font_path)
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

def load_training_data(log_dir):
    """加载训练数据"""
    training_progress_path = os.path.join(log_dir, "training_progress.json")
    if os.path.exists(training_progress_path):
        with open(training_progress_path, 'r') as f:
            return json.load(f)
    else:
        print(f"错误: {training_progress_path} 不存在")
        return None

def plot_epoch_loss_curves(log_dir, dataset_name):
    """绘制周期损失曲线"""
    # 加载训练数据
    training_data = load_training_data(log_dir)
    if not training_data or "epoch_losses" not in training_data or not training_data["epoch_losses"]:
        print(f"错误: 在 {log_dir} 中未找到有效的周期损失数据")
        return
    
    # 提取周期损失数据
    epochs = [epoch["epoch"] for epoch in training_data["epoch_losses"]]
    g_train_losses = [epoch["g_train_loss"] for epoch in training_data["epoch_losses"]]
    d_train_losses = [epoch["d_train_loss"] for epoch in training_data["epoch_losses"]]
    g_val_losses = [epoch["g_val_loss"] for epoch in training_data["epoch_losses"]]
    d_val_losses = [epoch["d_val_loss"] for epoch in training_data["epoch_losses"]]
    
    plt.figure(figsize=(12, 10))
    
    # 绘制生成器损失
    plt.subplot(2, 1, 1)
    plt.plot(epochs, g_train_losses, 'b-o', label='生成器训练损失', linewidth=2)
    plt.plot(epochs, g_val_losses, 'b--o', label='生成器验证损失', linewidth=2)
    
    # 添加数据标签
    for i, (g_train, g_val) in enumerate(zip(g_train_losses, g_val_losses)):
        plt.text(epochs[i], g_train, f'{g_train:.1f}', ha='center', va='bottom', fontproperties=font_prop)
        plt.text(epochs[i], g_val, f'{g_val:.1f}', ha='center', va='top', fontproperties=font_prop)
    
    # 计算并标注改进率
    initial_g_val = g_val_losses[0]
    final_g_val = g_val_losses[-1]
    improvement_rate = (initial_g_val - final_g_val) / initial_g_val * 100
    
    plt.annotate(
        f'改进率: {improvement_rate:.2f}%',
        xy=(epochs[-1], final_g_val),
        xytext=(epochs[-1] - 2, final_g_val + 5),
        arrowprops=dict(facecolor='black', shrink=0.05, width=1.5, headwidth=8),
        fontproperties=font_prop
    )
    
    plt.xlabel('训练周期', fontproperties=font_prop, fontsize=14)
    plt.ylabel('生成器损失', fontproperties=font_prop, fontsize=14)
    plt.title('生成器训练和验证损失', fontproperties=font_prop, fontsize=16)
    plt.legend(prop=font_prop)
    plt.grid(True, alpha=0.3)
    
    # 绘制判别器损失
    plt.subplot(2, 1, 2)
    plt.plot(epochs, d_train_losses, 'r-o', label='判别器训练损失', linewidth=2)
    plt.plot(epochs, d_val_losses, 'r--o', label='判别器验证损失', linewidth=2)
    
    # 添加数据标签
    for i, (d_train, d_val) in enumerate(zip(d_train_losses, d_val_losses)):
        plt.text(epochs[i], d_train, f'{d_train:.2f}', ha='center', va='bottom', fontproperties=font_prop)
        plt.text(epochs[i], d_val, f'{d_val:.2f}', ha='center', va='top', fontproperties=font_prop)
    
    # 设置Y轴范围
    all_d_losses = d_train_losses + d_val_losses
    if all_d_losses:
        # 计算合理的上下限
        d_min = max(0, min(all_d_losses) * 0.8)  # 下限不低于0
        d_max = max(all_d_losses) * 1.2  # 上限增加20%
        
        # 确保Y轴范围至少为0-1
        if d_max < 1.0:
            d_max = 1.0
        
        # 如果范围太小，适当扩大
        if d_max - d_min < 0.5:
            d_mean = (d_max + d_min) / 2
            d_min = max(0, d_mean - 0.25)
            d_max = d_mean + 0.25
            
        plt.ylim(d_min, d_max)
    
    # 添加理想范围标记
    plt.axhspan(0.5, 0.7, alpha=0.15, color='green', label='理想区间')
    plt.axhline(y=0.5, color='green', linestyle='--', alpha=0.5)
    plt.axhline(y=0.7, color='green', linestyle='--', alpha=0.5)
    
    plt.xlabel('训练周期', fontproperties=font_prop, fontsize=14)
    plt.ylabel('判别器损失', fontproperties=font_prop, fontsize=14)
    plt.title('判别器训练和验证损失', fontproperties=font_prop, fontsize=16)
    plt.legend(prop=font_prop)
    plt.grid(True, alpha=0.3)
    
    plt.suptitle(f'{dataset_name}模型 - 周期损失曲线', fontproperties=font_prop, fontsize=18)
    plt.tight_layout()
    plt.subplots_adjust(top=0.92)  # 为总标题留出空间
    
    # 保存图像
    save_path = os.path.join(log_dir, "epoch_loss_curves_zh.png")
    plt.savefig(save_path, dpi=150)
    plt.close()
    
    print(f"周期损失曲线已保存至 {save_path}")

def plot_batch_loss_curves(log_dir, dataset_name):
    """绘制批次损失曲线"""
    # 加载训练数据
    training_data = load_training_data(log_dir)
    if not training_data or "batch_losses" not in training_data or not training_data["batch_losses"]:
        print(f"错误: 在 {log_dir} 中未找到有效的批次损失数据")
        return
    
    # 获取每个批次的数据
    batch_data = training_data["batch_losses"]
    
    # 按轮次分组
    epochs = {}
    for b in batch_data:
        epoch = b["epoch"]
        if epoch not in epochs:
            epochs[epoch] = []
        epochs[epoch].append(b)
    
    plt.figure(figsize=(14, 10))
    
    # 创建颜色映射，使不同轮次使用不同颜色
    colors = plt.cm.viridis(np.linspace(0, 1, len(epochs)))
    
    # 绘制生成器损失
    plt.subplot(2, 1, 1)
    
    all_g_losses = []
    all_global_steps = []
    # 每轮使用不同颜色
    for i, (epoch, epoch_data) in enumerate(sorted(epochs.items())):
        batches = [b["batch"] for b in epoch_data]
        g_losses = [b["g_loss"] for b in epoch_data]
        
        # 计算这一轮的全局步数
        max_batch = max([max(epochs[e], key=lambda x: x["batch"])["batch"] for e in epochs if e <= epoch], default=0)
        global_steps = [(epoch - 1) * max_batch + b for b in batches]
        
        # 绘制原始数据点
        plt.plot(global_steps, g_losses, 'o-', color=colors[i], alpha=0.6, 
                 label=f'周期 {epoch}', markersize=4)
        
        # 收集所有数据用于计算移动平均线
        all_g_losses.extend(g_losses)
        all_global_steps.extend(global_steps)
    
    # 如果有足够多的数据点，添加移动平均线
    if len(all_g_losses) > 5:
        # 对数据点按全局步数排序
        sorted_data = sorted(zip(all_global_steps, all_g_losses))
        sorted_steps = [x[0] for x in sorted_data]
        sorted_losses = [x[1] for x in sorted_data]
        
        # 计算移动平均
        window_size = min(50, len(sorted_losses) // 10)  # 根据数据量确定窗口大小
        if window_size > 1:
            weights = np.ones(window_size) / window_size
            smoothed_losses = np.convolve(sorted_losses, weights, mode='valid')
            smoothed_steps = sorted_steps[window_size-1:]
            plt.plot(smoothed_steps, smoothed_losses, 'r-', linewidth=2, 
                     label=f'滑动平均 (窗口={window_size})')
    
    # 添加垂直线分隔不同轮次
    for epoch in sorted(epochs.keys())[1:]:  # 从第二个轮次开始
        if epoch-1 in epochs and epochs[epoch-1]:
            max_batch = max([max(epochs[e], key=lambda x: x["batch"])["batch"] for e in epochs if e <= epoch], default=0)
            boundary = (epoch - 1) * max_batch  # 轮次边界
            plt.axvline(x=boundary, color='gray', linestyle='--', alpha=0.5)
            # 添加轮次标签
            plt.text(boundary, plt.ylim()[1] * 0.9, f'周期 {epoch}', 
                     horizontalalignment='center', verticalalignment='top',
                     bbox=dict(facecolor='white', alpha=0.8),
                     fontproperties=font_prop)
    
    plt.ylabel('生成器损失', fontproperties=font_prop, fontsize=14)
    plt.title('批次级别生成器损失', fontproperties=font_prop, fontsize=16)
    plt.grid(True, alpha=0.3)
    plt.legend(prop=font_prop, loc='upper right')
    
    # 绘制判别器损失
    plt.subplot(2, 1, 2)
    
    all_d_losses = []
    # 每轮使用不同颜色
    for i, (epoch, epoch_data) in enumerate(sorted(epochs.items())):
        batches = [b["batch"] for b in epoch_data]
        d_losses = [b["d_loss"] for b in epoch_data]
        
        # 计算这一轮的全局步数
        max_batch = max([max(epochs[e], key=lambda x: x["batch"])["batch"] for e in epochs if e <= epoch], default=0)
        global_steps = [(epoch - 1) * max_batch + b for b in batches]
        
        # 绘制原始数据点
        plt.plot(global_steps, d_losses, 'o-', color=colors[i], alpha=0.6, 
                 label=f'周期 {epoch}', markersize=4)
        
        # 收集所有数据用于计算移动平均线
        all_d_losses.extend(d_losses)
        
    # 如果有足够多的数据点，添加移动平均线
    if len(all_d_losses) > 5:
        # 对数据点按全局步数排序
        sorted_data = sorted(zip(all_global_steps, all_d_losses))
        sorted_steps = [x[0] for x in sorted_data]
        sorted_losses = [x[1] for x in sorted_data]
        
        # 计算移动平均
        window_size = min(50, len(sorted_losses) // 10)  # 根据数据量确定窗口大小
        if window_size > 1:
            weights = np.ones(window_size) / window_size
            smoothed_losses = np.convolve(sorted_losses, weights, mode='valid')
            smoothed_steps = sorted_steps[window_size-1:]
            plt.plot(smoothed_steps, smoothed_losses, 'r-', linewidth=2, 
                     label=f'滑动平均 (窗口={window_size})')
    
    # 添加垂直线分隔不同轮次
    for epoch in sorted(epochs.keys())[1:]:  # 从第二个轮次开始
        if epoch-1 in epochs and epochs[epoch-1]:
            max_batch = max([max(epochs[e], key=lambda x: x["batch"])["batch"] for e in epochs if e <= epoch], default=0)
            boundary = (epoch - 1) * max_batch  # 轮次边界
            plt.axvline(x=boundary, color='gray', linestyle='--', alpha=0.5)
            # 添加轮次标签
            plt.text(boundary, plt.ylim()[1] * 0.9, f'周期 {epoch}', 
                     horizontalalignment='center', verticalalignment='top',
                     bbox=dict(facecolor='white', alpha=0.8),
                     fontproperties=font_prop)
    
    # 设置固定的Y轴范围，使判别器损失的波动更加明显
    if all_d_losses:
        # 计算合理的上下限
        d_min = max(0, min(all_d_losses) * 0.8)  # 下限不低于0
        d_max = max(all_d_losses) * 1.2  # 上限增加20%
        
        # 确保Y轴范围至少为0-1
        if d_max < 1.0:
            d_max = 1.0
        
        # 如果范围太小，适当扩大
        if d_max - d_min < 0.5:
            d_mean = (d_max + d_min) / 2
            d_min = max(0, d_mean - 0.25)
            d_max = d_mean + 0.25
            
        plt.ylim(d_min, d_max)
        
        # 添加水平参考线，表示理想范围
        plt.axhspan(0.5, 0.7, alpha=0.15, color='green', label='理想区间')
        plt.axhline(y=0.5, color='green', linestyle='--', alpha=0.5)
        plt.axhline(y=0.7, color='green', linestyle='--', alpha=0.5)
    
    plt.xlabel('训练步数', fontproperties=font_prop, fontsize=14)
    plt.ylabel('判别器损失', fontproperties=font_prop, fontsize=14)
    plt.title('批次级别判别器损失', fontproperties=font_prop, fontsize=16)
    plt.grid(True, alpha=0.3)
    plt.legend(prop=font_prop, loc='upper right')
    
    # 添加总标题
    plt.suptitle(f'{dataset_name}模型 - 批次损失曲线', fontproperties=font_prop, fontsize=18)
    
    plt.tight_layout()
    plt.subplots_adjust(top=0.92)  # 为总标题留出空间
    save_path = os.path.join(log_dir, "batch_loss_curves_zh.png")
    plt.savefig(save_path, dpi=150)
    plt.close()
    
    print(f"批次损失曲线已保存至 {save_path}")

def plot_dataset_comparison(log_dirs, dataset_names):
    """绘制不同数据集之间的对比图"""
    plt.figure(figsize=(14, 10))
    
    # 设置线条样式和颜色
    styles = [('b-', 'b--'), ('r-', 'r--'), ('g-', 'g--'), ('m-', 'm--')]
    
    for i, (log_dir, dataset_name) in enumerate(zip(log_dirs, dataset_names)):
        # 加载训练数据
        training_data = load_training_data(log_dir)
        if not training_data or "epoch_losses" not in training_data or not training_data["epoch_losses"]:
            print(f"错误: 在 {log_dir} 中未找到有效的周期损失数据")
            continue
        
        # 提取周期损失数据
        epochs = [epoch["epoch"] for epoch in training_data["epoch_losses"]]
        g_val_losses = [epoch["g_val_loss"] for epoch in training_data["epoch_losses"]]
        d_val_losses = [epoch["d_val_loss"] for epoch in training_data["epoch_losses"]]
        
        # 绘制验证损失
        style = styles[i % len(styles)]
        plt.plot(epochs, g_val_losses, style[0], linewidth=2,
                 label=f'{dataset_name} - 生成器验证损失')
        plt.plot(epochs, d_val_losses, style[1], linewidth=2,
                 label=f'{dataset_name} - 判别器验证损失')
        
        # 添加最终损失标签
        plt.text(epochs[-1], g_val_losses[-1], f'{g_val_losses[-1]:.2f}', 
                 ha='right', va='bottom', fontproperties=font_prop)
    
    # 设置图表属性
    plt.title('不同数据集模型性能对比', fontproperties=font_prop, fontsize=18)
    plt.xlabel('训练周期', fontproperties=font_prop, fontsize=14)
    plt.ylabel('验证损失', fontproperties=font_prop, fontsize=14)
    plt.grid(True, alpha=0.3)
    plt.legend(prop=font_prop)
    
    # 保存图表
    save_path = 'logs/dataset_comparison_zh.png'
    plt.tight_layout()
    plt.savefig(save_path, dpi=150)
    plt.close()
    
    print(f"数据集对比图已保存至 {save_path}")

def generate_training_metrics(log_dir):
    """生成训练指标摘要"""
    # 加载训练数据
    training_data = load_training_data(log_dir)
    if not training_data:
        return
    
    # 提取训练信息
    dataset_name = os.path.basename(log_dir)
    start_time = training_data.get("start_time", 0)
    end_time = training_data.get("end_time", time.time() if start_time else 0)
    total_time = end_time - start_time if start_time and end_time else 0
    
    # 计算训练指标
    epoch_losses = training_data.get("epoch_losses", [])
    if not epoch_losses:
        print(f"警告: {log_dir} 中没有轮次损失数据")
        return
    
    # 初始和最终损失
    initial_g_val = epoch_losses[0]["g_val_loss"]
    final_g_val = epoch_losses[-1]["g_val_loss"]
    improvement_rate = (initial_g_val - final_g_val) / initial_g_val * 100 if initial_g_val != 0 else 0
    
    # 找出最佳验证损失
    best_g_val_epoch = min(epoch_losses, key=lambda x: x["g_val_loss"])
    best_g_val_loss = best_g_val_epoch["g_val_loss"]
    best_epoch = best_g_val_epoch["epoch"]
    
    # 打印训练指标摘要
    print(f"\n===== {dataset_name} 模型训练指标 =====")
    print(f"训练周期数: {len(epoch_losses)}")
    print(f"训练时间: {total_time/3600:.2f} 小时")
    print(f"初始生成器验证损失: {initial_g_val:.4f}")
    print(f"最终生成器验证损失: {final_g_val:.4f}")
    print(f"改进率: {improvement_rate:.2f}%")
    print(f"最佳生成器验证损失: {best_g_val_loss:.4f} (周期 {best_epoch})")
    print("=============================\n")

def main():
    # 处理花卉数据集
    flower_log_dir = 'logs/flower'
    plot_epoch_loss_curves(flower_log_dir, '花卉')
    plot_batch_loss_curves(flower_log_dir, '花卉')
    generate_training_metrics(flower_log_dir)
    
    # 处理人脸数据集
    face_log_dir = 'logs/small_face'
    plot_epoch_loss_curves(face_log_dir, '人脸')
    plot_batch_loss_curves(face_log_dir, '人脸')
    generate_training_metrics(face_log_dir)
    
    # 处理混合数据集
    mixed_log_dir = 'logs/mixed_dataset'
    plot_epoch_loss_curves(mixed_log_dir, '通用模型')
    plot_batch_loss_curves(mixed_log_dir, '通用模型')
    generate_training_metrics(mixed_log_dir)
    
    # 创建数据集对比图
    plot_dataset_comparison(
        [flower_log_dir, face_log_dir, mixed_log_dir],
        ['花卉', '人脸', '通用模型']
    )
    
    print("所有中文图表生成完成！")

if __name__ == "__main__":
    import time  # 添加缺失的导入
    main() 