import torch
import cv2
import numpy as np
from pathlib import Path
from typing import List, Tuple, Optional, Dict, Any
import json
from datetime import datetime
import time
import requests
import tempfile
import shutil

from config import config
from utils import LogManager, file_manager

class InferenceEngine:
    """推理引擎"""
    
    def __init__(self):
        self.logger = LogManager.get_logger("InferenceEngine")
        self.model = None
        self.model_path = None
        self.model_metadata = {}  # 存储模型元数据
        self.device = self._get_device()
        self.confidence_threshold = config.inference.confidence_threshold
        
        # 目录路径
        self.temp_dir = Path(config.dataset.temp_dir)
        self.dataset_dir = Path(config.dataset.datasets_dir)
        self.images_dir = self.dataset_dir / 'images'
        self.labels_dir = self.dataset_dir / 'labels'
        self.confidence_dir = self.dataset_dir / config.dataset.confidence_dir
        self.models_dir = Path("models")
        
        # 模型版本管理
        self.model_registry_file = self.models_dir / "model_registry.json"
        self.backup_models_dir = self.models_dir / "backup"
        
        # 确保目录存在
        for dir_path in [self.temp_dir, self.images_dir, self.labels_dir, 
                        self.confidence_dir, self.models_dir, self.backup_models_dir]:
            dir_path.mkdir(parents=True, exist_ok=True)
    
    def _get_device(self) -> str:
        """获取推理设备"""
        if torch.cuda.is_available():
            return "cuda"
        elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
            return "mps"
        else:
            return "cpu"
    
    def load_default_model(self) -> bool:
        """加载默认模型"""
        if not config.inference.model_name:
            self.logger.debug("配置中未指定默认模型名称")
            return False
            
        default_model_path = self.models_dir / config.inference.model_name
        if default_model_path.exists():
            return self.load_model(str(default_model_path))
        else:
            self.logger.warning(f"默认模型不存在: {default_model_path}")
            return False
    
    def load_model(self, model_path: str, model_id: str = None) -> bool:
        """加载模型，支持模型元数据记录"""
        try:
            model_file = Path(model_path)
            
            # 如果是相对路径，尝试在models目录中查找
            if not model_file.is_absolute():
                # 检查路径是否已经包含models前缀，避免重复
                if model_path.startswith('models/'):
                    model_file = Path(model_path)
                else:
                    model_file = self.models_dir / model_path
            
            if not model_file.exists():
                self.logger.error(f"模型文件不存在: {model_file}")
                return False
            
            # 备份当前模型（如果有的话）
            if self.model is not None:
                self._backup_current_model()
            
            # 加载YOLO模型
            try:
                from ultralytics import YOLO
                new_model = YOLO(str(model_file))
                
                # 如果成功加载，更新模型信息
                self.model = new_model
                self.model_path = str(model_file)
                
                # 更新模型元数据
                self.model_metadata = {
                    "model_id": model_id or model_file.stem,
                    "model_path": str(model_file),
                    "loaded_at": datetime.now().isoformat(),
                    "device": self.device,
                    "model_size": model_file.stat().st_size,
                    "model_version": self._extract_model_version(model_file)
                }
                
                # 更新模型注册表
                self._update_model_registry()
                
                self.logger.info(f"模型加载成功: {model_file}")
                return True
                
            except ImportError:
                self.logger.error("ultralytics库未安装，无法加载YOLO模型")
                return False
            except Exception as e:
                self.logger.error(f"加载YOLO模型失败: {str(e)}")
                # 尝试恢复之前的模型
                self._restore_backup_model()
                return False
                
        except Exception as e:
            self.logger.error(f"加载模型失败: {str(e)}")
            return False
    
    def _backup_current_model(self):
        """备份当前模型"""
        if self.model_path and Path(self.model_path).exists():
            try:
                backup_name = f"backup_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{Path(self.model_path).name}"
                backup_path = self.backup_models_dir / backup_name
                shutil.copy2(self.model_path, backup_path)
                self.logger.info(f"当前模型已备份: {backup_path}")
            except Exception as e:
                self.logger.warning(f"备份模型失败: {str(e)}")
    
    def _restore_backup_model(self):
        """恢复备份模型"""
        try:
            backup_files = list(self.backup_models_dir.glob("backup_*.pt"))
            if backup_files:
                # 选择最新的备份
                latest_backup = max(backup_files, key=lambda p: p.stat().st_mtime)
                self.logger.info(f"尝试恢复备份模型: {latest_backup}")
                return self.load_model(str(latest_backup))
        except Exception as e:
            self.logger.error(f"恢复备份模型失败: {str(e)}")
        return False
    
    def _extract_model_version(self, model_file: Path) -> str:
        """提取模型版本信息"""
        try:
            # 尝试从文件名中提取版本信息
            if "_v" in model_file.stem:
                return model_file.stem.split("_v")[-1]
            else:
                # 使用文件修改时间作为版本标识
                return datetime.fromtimestamp(model_file.stat().st_mtime).strftime("%Y%m%d_%H%M%S")
        except:
            return "unknown"
    
    def _update_model_registry(self):
        """更新模型注册表"""
        try:
            registry = []
            if self.model_registry_file.exists():
                with open(self.model_registry_file, 'r') as f:
                    registry = json.load(f)
            
            # 添加当前模型信息
            registry.append(self.model_metadata)
            
            # 保持最近10个模型记录
            registry = registry[-10:]
            
            with open(self.model_registry_file, 'w') as f:
                json.dump(registry, f, indent=2)
                
        except Exception as e:
            self.logger.warning(f"更新模型注册表失败: {str(e)}")

    def has_model(self) -> bool:
        """检查是否已加载模型"""
        return self.model is not None
    
    def infer_single_image(self, image_path: Path) -> Optional[Tuple[float, List[Dict]]]:
        """对单张图片进行推理"""
        if not self.has_model():
            self.logger.error("模型未加载")
            return None
        
        try:
            # 读取图片
            image = cv2.imread(str(image_path))
            if image is None:
                self.logger.error(f"无法读取图片: {image_path}")
                return None
            
            # 进行推理
            results = self.model(image)
            
            # 解析结果
            detections = []
            max_confidence = 0.0
            
            for result in results:
                if result.boxes is not None:
                    boxes = result.boxes
                    for i in range(len(boxes)):
                        # 获取边界框和置信度
                        box = boxes.xywh[i].cpu().numpy()  # XYWH格式
                        conf = float(boxes.conf[i].cpu().numpy())
                        cls_id = int(boxes.cls[i].cpu().numpy())
                        
                        # 更新最大置信度
                        max_confidence = max(max_confidence, conf)
                        
                        # 转换为YOLO格式（相对坐标）
                        img_height, img_width = image.shape[:2]
                        x_center = box[0] / img_width
                        y_center = box[1] / img_height
                        width = box[2] / img_width
                        height = box[3] / img_height
                        
                        detection = {
                            "class_id": cls_id,
                            "confidence": conf,
                            "bbox": [x_center, y_center, width, height]
                        }
                        detections.append(detection)
            
            return max_confidence, detections
            
        except Exception as e:
            self.logger.error(f"推理失败: {image_path}, 错误: {str(e)}")
            return None
    
    def process_temp_images(self) -> int:
        """处理temp目录中的图片"""
        if not self.has_model():
            self.logger.debug("模型未加载，跳过推理")
            return 0
        
        # 获取temp目录中的图片
        image_files = []
        for ext in ['*.jpg', '*.jpeg', '*.png', '*.bmp']:
            image_files.extend(file_manager.list_files(self.temp_dir, ext))
        
        if not image_files:
            self.logger.debug("temp目录中没有图片文件")
            return 0
        
        processed_count = 0
        
        for image_path in image_files:
            if self._process_single_temp_image(image_path):
                processed_count += 1
        
        if processed_count > 0:
            self.logger.info(f"完成推理处理: {processed_count} 张图片")
        
        return processed_count
    
    def _process_single_temp_image(self, image_path: Path) -> bool:
        """处理单张temp图片，使用原子操作确保数据一致性"""
        try:
            # 进行推理
            result = self.infer_single_image(image_path)
            if result is None:
                return False
            
            confidence, detections = result
            base_name = image_path.stem
            inference_time = datetime.now()
            
            # 准备目标文件路径
            target_image_path = self.images_dir / image_path.name
            label_file = self.labels_dir / f"{base_name}.txt"
            confidence_file = self.confidence_dir / f"{base_name}.txt"
            
            # 临时文件，确保原子操作
            temp_label_file = label_file.with_suffix('.tmp')
            temp_confidence_file = confidence_file.with_suffix('.tmp')
            
            try:
                # 保存临时文件
                self._save_labels(temp_label_file, detections)
                self._save_confidence_with_metadata(temp_confidence_file, confidence, inference_time, base_name)
                
                # 移动图片文件
                if file_manager.move_file(image_path, target_image_path):
                    # 原子性地移动临时文件到最终位置
                    temp_label_file.rename(label_file)
                    temp_confidence_file.rename(confidence_file)
                    
                    self.logger.debug(f"处理完成: {base_name}, 置信度: {confidence:.3f}")
                    return True
                else:
                    # 如果移动图片失败，清理临时文件
                    temp_label_file.unlink(missing_ok=True)
                    temp_confidence_file.unlink(missing_ok=True)
                    return False
                    
            except Exception as e:
                # 清理临时文件
                temp_label_file.unlink(missing_ok=True)
                temp_confidence_file.unlink(missing_ok=True)
                raise e
                
        except Exception as e:
            self.logger.error(f"处理图片失败: {image_path}, 错误: {str(e)}")
            return False
    
    def _save_labels(self, label_file: Path, detections: List[Dict]):
        """保存标签文件"""
        try:
            with open(label_file, 'w') as f:
                for detection in detections:
                    bbox = detection['bbox']
                    f.write(f"{detection['class_id']} {bbox[0]:.6f} {bbox[1]:.6f} {bbox[2]:.6f} {bbox[3]:.6f}\n")
        except Exception as e:
            self.logger.error(f"保存标签失败: {label_file}, 错误: {str(e)}")
            raise e
    
    def _save_confidence_with_metadata(self, confidence_file: Path, confidence: float, inference_time: datetime, sample_name: str):
        """保存置信度信息和元数据"""
        try:
            confidence_data = {
                "confidence": confidence,
                "inference_time": inference_time.isoformat(),
                "sample_name": sample_name,
                "model_info": {
                    "model_id": self.model_metadata.get("model_id", "unknown"),
                    "model_version": self.model_metadata.get("model_version", "unknown"),
                    "device": self.device
                },
                "threshold": self.confidence_threshold,
                "status": "uninferred" if confidence < self.confidence_threshold else "inferred"
            }
            
            with open(confidence_file, 'w') as f:
                json.dump(confidence_data, f, indent=2)
                
        except Exception as e:
            self.logger.error(f"保存置信度失败: {confidence_file}, 错误: {str(e)}")
            raise e
    
    def get_dataset_stats(self) -> Dict[str, Any]:
        """获取数据集统计信息"""
        try:
            # 统计各目录文件数量
            temp_images = len(file_manager.list_files(self.temp_dir, "*.jpg")) + \
                         len(file_manager.list_files(self.temp_dir, "*.png"))
            
            dataset_images = len(file_manager.list_files(self.images_dir, "*.jpg")) + \
                           len(file_manager.list_files(self.images_dir, "*.png"))
            
            labels = len(file_manager.list_files(self.labels_dir, "*.txt"))
            confidences = len(file_manager.list_files(self.confidence_dir, "*.txt"))
            
            return {
                "temp_images": temp_images,
                "dataset_images": dataset_images,
                "labels": labels,
                "confidences": confidences,
                "model_loaded": self.has_model(),
                "model_path": self.model_path,
                "device": self.device
            }
        except Exception as e:
            self.logger.error(f"获取数据集统计失败: {str(e)}")
            return {}
    
    def update_model(self, model_id: str, model_url: str, model_path: str = None) -> bool:
        """更新模型，支持版本管理和回滚"""
        try:
            self.logger.info(f"开始更新模型: {model_id}")
            
            # 确定目标路径
            if model_path:
                target_path = Path(model_path)
            else:
                target_path = self.models_dir / f"{model_id}.pt"
            
            # 下载新模型到临时位置
            temp_model_path = self.models_dir / f"temp_{model_id}_{int(time.time())}.pt"
            
            if not self._download_model(model_url, temp_model_path):
                return False
            
            try:
                # 验证新模型是否可以正常加载
                from ultralytics import YOLO
                test_model = YOLO(str(temp_model_path))
                
                # 如果验证通过，备份当前模型并更新
                old_model_info = self.model_metadata.copy() if self.model_metadata else None
                
                if self.load_model(str(temp_model_path), model_id):
                    # 移动临时模型到正式位置
                    if target_path.exists():
                        target_path.unlink()
                    temp_model_path.rename(target_path)
                    
                    self.logger.info(f"模型更新成功: {model_id}")
                    return True
                else:
                    # 加载失败，恢复旧模型
                    if old_model_info:
                        self.load_model(old_model_info["model_path"], old_model_info["model_id"])
                    temp_model_path.unlink(missing_ok=True)
                    return False
                    
            except Exception as e:
                self.logger.error(f"验证新模型失败: {str(e)}")
                temp_model_path.unlink(missing_ok=True)
                return False
                
        except Exception as e:
            self.logger.error(f"更新模型失败: {str(e)}")
            return False
    
    def _download_model(self, url: str, target_path: Path) -> bool:
        """下载模型文件"""
        try:
            self.logger.info(f"开始下载模型: {url}")
            
            response = requests.get(url, stream=True, timeout=300)
            response.raise_for_status()
            
            # 创建目标目录
            target_path.parent.mkdir(parents=True, exist_ok=True)
            
            # 下载到临时文件，然后移动（原子操作）
            with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
                for chunk in response.iter_content(chunk_size=8192):
                    tmp_file.write(chunk)
                
                temp_path = Path(tmp_file.name)
            
            # 原子性地移动文件
            shutil.move(str(temp_path), str(target_path))
            
            self.logger.info(f"模型下载成功: {target_path}")
            return True
            
        except Exception as e:
            self.logger.error(f"下载模型失败: {str(e)}")
            return False

# 全局实例
inference_engine = InferenceEngine() 