"""
优化的推理脚本 - 集成所有推理性能优化
提高推理速度的完整解决方案
"""

import os
import time
import psutil
from datetime import datetime
from pathlib import Path
import sys
import torch
import numpy as np

# 添加项目路径
sys.path.append(r'D:\CodeRode\project\EmbedTrack-master')

# 导入优化配置
from optimization_config import get_optimized_inference_config, get_preset_config
from embedtrack.infer.infer_ctc_data import inference

def setup_optimized_inference_environment():
    """设置优化的推理环境"""
    print("=" * 80)
    print("设置优化的推理环境...")
    
    # 导入优化配置
    opt_config = get_optimized_inference_config()
    
    # CUDA优化设置
    if torch.cuda.is_available():
        torch.backends.cudnn.benchmark = True
        torch.backends.cudnn.deterministic = False
        torch.backends.cudnn.enabled = True
        torch.cuda.empty_cache()
        
        # 启用混合精度
        torch.backends.cuda.matmul.allow_tf32 = True
        torch.backends.cudnn.allow_tf32 = True
        
        print(f"CUDA可用，GPU数量: {torch.cuda.device_count()}")
        for i in range(torch.cuda.device_count()):
            props = torch.cuda.get_device_properties(i)
            print(f"  GPU {i}: {props.name} ({props.total_memory / 1024**3:.1f}GB)")
    else:
        print("CUDA不可用，将使用CPU推理")
    
    print("推理环境设置完成")
    print("=" * 80)

def monitor_inference_resources():
    """监控推理资源使用情况"""
    cpu_percent = psutil.cpu_percent(interval=1)
    memory = psutil.virtual_memory()
    
    print(f"推理资源监控:")
    print(f"  CPU使用率: {cpu_percent:.1f}%")
    print(f"  内存使用: {memory.percent:.1f}% ({memory.used / 1024**3:.1f}GB / {memory.total / 1024**3:.1f}GB)")
    
    if torch.cuda.is_available():
        for i in range(torch.cuda.device_count()):
            allocated = torch.cuda.memory_allocated(i) / 1024**3
            cached = torch.cuda.memory_reserved(i) / 1024**3
            total = torch.cuda.get_device_properties(i).total_memory / 1024**3
            print(f"  GPU {i} 内存: {allocated:.2f}GB / {cached:.2f}GB / {total:.2f}GB")

def optimize_model_for_inference(model_path, config_file):
    """优化模型用于推理"""
    print("优化模型用于推理...")
    
    # 这里可以添加模型优化代码
    # 例如：模型量化、融合批归一化层等
    # 由于需要加载模型，这里只是占位符
    
    print("模型优化完成")

# 全局配置
FILE_PATH = Path(__file__)
PROJECT_PATH = str(FILE_PATH.parent)

RAW_DATA_PATHS = [os.path.join(PROJECT_PATH, "ctc_raw_data/challenge")]
MODEL_PATH = os.path.join(PROJECT_PATH, "models")
RES_PATH = os.path.join(PROJECT_PATH, "results")

# 数据集配置
DATA_SETS = [
    "3t3",
    "hela",
    "pc3",
    "raw",
]

# 推理配置
CALC_CTC_METRICS = False
MODEL_NAME = "optimized_adam_norm_onecycle_12"  # 使用优化训练的模型

def main():
    """主推理函数"""
    # 设置优化环境
    setup_optimized_inference_environment()
    
    # 获取优化配置
    opt_config = get_preset_config('fast_inference')  # 使用快速推理预设
    
    print(f"使用推理优化配置: {opt_config}")
    
    # 获取优化的批次大小
    BATCH_SIZE = opt_config['batch_size']
    
    total_start_time = time.time()
    total_inference_time = 0
    total_images = 0
    
    print(f"开始推理，批次大小: {BATCH_SIZE}")
    
    for raw_data_path in RAW_DATA_PATHS:
        for data_set in DATA_SETS:
            print(f"\n{'='*60}")
            print(f"开始推理数据集: {data_set}")
            print(f"{'='*60}")
            
            # 检查模型是否存在
            model_dir = os.path.join(MODEL_PATH, data_set, MODEL_NAME)
            if not os.path.exists(model_dir):
                print(f"未找到数据集 {data_set} 的训练模型，跳过")
                continue
            
            # 获取最新的模型
            timestamps_trained_models = [
                datetime.strptime(time_stamp, "%Y-%m-%d---%H-%M-%S")
                for time_stamp in os.listdir(model_dir)
            ]
            timestamps_trained_models.sort()
            last_model = timestamps_trained_models[-1].strftime("%Y-%m-%d---%H-%M-%S")
            model_path = os.path.join(model_dir, last_model, "best_iou_model.pth")
            config_file = os.path.join(model_dir, last_model, "config.json")
            
            if not os.path.exists(model_path):
                print(f"未找到模型文件: {model_path}")
                continue
            
            # 优化模型用于推理
            optimize_model_for_inference(model_path, config_file)
            
            # 处理每个数据ID
            for data_id in ["08", "09"]:
                img_path = os.path.join(raw_data_path, data_set, data_id)
                
                if not os.path.exists(img_path):
                    print(f"图像路径不存在: {img_path}")
                    continue
                
                print(f"推理: {data_set}/{data_id}")
                
                # 监控资源
                monitor_inference_resources()
                
                # 开始推理
                t_start = time.time()
                try:
                    inference(img_path, model_path, config_file, batch_size=BATCH_SIZE)
                    t_end = time.time()
                    
                    inference_time = t_end - t_start
                    total_inference_time += inference_time
                    
                    # 计算处理的图像数量
                    image_files = [f for f in os.listdir(img_path) if f.endswith('.tif')]
                    total_images += len(image_files)
                    
                    print(f"推理完成: {img_path}")
                    print(f"推理时间: {inference_time:.2f}秒")
                    print(f"处理图像数: {len(image_files)}")
                    print(f"平均每张图像: {inference_time/len(image_files):.3f}秒")
                    
                except Exception as e:
                    print(f"推理失败: {img_path}, 错误: {str(e)}")
                    continue
                
                # 清理GPU内存
                if torch.cuda.is_available():
                    torch.cuda.empty_cache()
    
    total_time = time.time() - total_start_time
    
    print(f"\n{'='*80}")
    print(f"推理完成统计:")
    print(f"总耗时: {total_time:.2f}秒 ({total_time/60:.2f}分钟)")
    print(f"纯推理时间: {total_inference_time:.2f}秒")
    print(f"总处理图像数: {total_images}")
    if total_images > 0:
        print(f"平均每张图像推理时间: {total_inference_time/total_images:.3f}秒")
        print(f"推理速度: {total_images/total_inference_time:.2f} 图像/秒")
    print(f"{'='*80}")

if __name__ == '__main__':
    main() 