#!/usr/bin/env python3
import sys
import os
import argparse
from utils.training_logger import TrainingLogger

def main():
    parser = argparse.ArgumentParser(description='分析训练日志并生成报告')
    parser.add_argument('--log_file', type=str, help='训练日志文件路径')
    parser.add_argument('--output_dir', type=str, default='logs', help='输出目录')
    parser.add_argument('--analyze_flower', action='store_true', help='分析花卉模型训练日志')
    parser.add_argument('--analyze_small_flower', action='store_true', help='分析小型花卉模型训练日志')
    parser.add_argument('--analyze_all', action='store_true', help='分析所有训练日志')
    args = parser.parse_args()

    # 如果指定了日志文件，直接分析该文件
    if args.log_file:
        analyze_log_file(args.log_file, args.output_dir)
        return

    # 定义要分析的日志文件
    logs_to_analyze = []
    
    if args.analyze_all or args.analyze_flower:
        # 尝试查找花卉模型训练日志
        flower_log = 'flower_training.log'
        if os.path.exists(flower_log):
            logs_to_analyze.append(('Flower Model', flower_log))
    
    if args.analyze_all or args.analyze_small_flower:
        # 尝试查找小型花卉模型训练日志
        small_flower_log = 'small_flower_training.log'
        if os.path.exists(small_flower_log):
            logs_to_analyze.append(('Small Flower Model', small_flower_log))
    
    # 如果没有找到任何日志文件，提示用户
    if not logs_to_analyze:
        print("找不到训练日志文件。请确保日志文件存在，或使用--log_file参数指定日志文件路径。")
        return
    
    # 分析找到的所有日志文件
    for model_name, log_file in logs_to_analyze:
        print(f"正在分析{model_name}训练日志: {log_file}")
        analyze_log_file(log_file, os.path.join(args.output_dir, model_name.lower().replace(' ', '_')))

def analyze_log_file(log_file, output_dir):
    """分析训练日志文件并生成报告
    
    参数:
        log_file: 日志文件路径
        output_dir: 输出目录
    """
    # 创建训练日志处理器
    logger = TrainingLogger(output_dir)
    
    try:
        # 读取日志文件内容
        with open(log_file, 'r') as f:
            log_content = f.read()
        
        # 从日志中提取信息
        import re
        
        # 提取训练信息的正则表达式
        epoch_pattern = r"Epoch (\d+)/(\d+)"
        train_loss_pattern = r"Training - Generator loss: ([\d\.]+), Discriminator loss: ([\d\.]+)"
        val_loss_pattern = r"Validation - Generator loss: ([\d\.]+), Discriminator loss: ([\d\.]+)"
        batch_pattern = r"Batch \[(\d+)/(\d+)\] - D loss: ([\d\.]+), G loss: ([\d\.]+)"
        
        # 从日志中提取数据集信息
        dataset_match = re.search(r"Training set size: (\d+), Validation set size: (\d+)", log_content)
        if dataset_match:
            train_size = int(dataset_match.group(1))
            val_size = int(dataset_match.group(2))
            logger.log_dataset_info(train_size, val_size, 256, os.path.basename(log_file).replace('_training.log', ''))
        
        # 从日志中提取训练超参数
        total_epochs_match = re.search(epoch_pattern, log_content)
        batch_size_match = re.search(r"Batch \[1/(\d+)\]", log_content)
        
        if total_epochs_match and batch_size_match:
            total_epochs = int(total_epochs_match.group(2))
            batch_count = int(batch_size_match.group(1))
            batch_size = train_size // batch_count if train_size and batch_count else "Unknown"
            
            # 记录超参数
            hyperparameters = {
                "total_epochs": total_epochs,
                "batch_size": batch_size,
                "batch_count": batch_count,
                "train_size": train_size if dataset_match else "Unknown",
                "val_size": val_size if dataset_match else "Unknown",
            }
            
            # 创建一个参数对象来传递给logger
            class Args:
                pass
            
            args = Args()
            for key, value in hyperparameters.items():
                setattr(args, key, value)
            
            logger.log_hyperparameters(args)
        
        # 提取每个epoch的损失值
        current_epoch = None
        max_batch_idx = 0
        
        # 首先找出最大批次索引，用于batch_count
        for line in log_content.splitlines():
            batch_match = re.search(batch_pattern, line)
            if batch_match:
                batch_idx = int(batch_match.group(1))
                total_batches = int(batch_match.group(2))
                max_batch_idx = max(max_batch_idx, total_batches)
        
        # 分析日志内容
        for line in log_content.splitlines():
            # 提取当前epoch
            epoch_match = re.search(epoch_pattern, line)
            if epoch_match:
                current_epoch = int(epoch_match.group(1))
            
            # 如果已经知道当前epoch
            if current_epoch is not None:
                # 提取batch损失
                batch_match = re.search(batch_pattern, line)
                if batch_match:
                    batch_idx = int(batch_match.group(1))
                    d_loss = float(batch_match.group(3))
                    g_loss = float(batch_match.group(4))
                    logger.log_batch(current_epoch, batch_idx, max_batch_idx, g_loss, d_loss)
                
                # 提取训练损失
                train_loss_match = re.search(train_loss_pattern, line)
                if train_loss_match:
                    g_train_loss = float(train_loss_match.group(1))
                    d_train_loss = float(train_loss_match.group(2))
                    
                    # 寻找对应epoch的验证损失
                    g_val_loss = None
                    d_val_loss = None
                    
                    # 在当前行之后查找验证损失
                    log_lines = log_content.splitlines()
                    current_line_idx = log_lines.index(line)
                    
                    for i in range(current_line_idx + 1, min(current_line_idx + 20, len(log_lines))):
                        val_loss_match = re.search(val_loss_pattern, log_lines[i])
                        if val_loss_match:
                            g_val_loss = float(val_loss_match.group(1))
                            d_val_loss = float(val_loss_match.group(2))
                            break
                    
                    # 如果找到了对应的验证损失，记录这个epoch的完整信息
                    if g_val_loss is not None and d_val_loss is not None:
                        logger.log_epoch(current_epoch, g_train_loss, d_train_loss, g_val_loss, d_val_loss)
        
        # 生成训练报告
        logger.log_training_complete()
        print(f"分析完成！报告已生成: {logger.session_dir}/training_report.md")
        
        # 打印报告路径
        print(f"报告路径: {os.path.abspath(os.path.join(logger.session_dir, 'training_report.md'))}")
        print(f"图表路径: {os.path.abspath(os.path.join(logger.session_dir, 'loss_curves.png'))}")
        
    except Exception as e:
        print(f"分析日志文件时出错: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main() 