"""
垃圾检测模型管理器
负责训练、验证和管理垃圾检测专用模型
"""
import asyncio
import logging
import os
import json
import pickle
import numpy as np
import cv2
from datetime import datetime, timedelta
from typing import List, Dict, Optional, Tuple, Any, Union
from uuid import uuid4
from pathlib import Path
import shutil
from collections import defaultdict

try:
    from ultralytics import YOLO
    import torch
    import torchvision.transforms as transforms
    from sklearn.metrics import precision_recall_fscore_support, confusion_matrix
    import matplotlib.pyplot as plt
    import seaborn as sns
    ML_AVAILABLE = True
except ImportError:
    ML_AVAILABLE = False
    logging.warning("Machine learning libraries not available. Model training will be disabled.")

from schemas.hygiene_management import WasteType, WasteDetectionConfig
from core.config import get_settings

logger = logging.getLogger(__name__)
settings = get_settings()


class WasteDataset:
    """垃圾检测数据集管理"""
    
    def __init__(self, dataset_path: str):
        self.dataset_path = Path(dataset_path)
        self.images_path = self.dataset_path / "images"
        self.labels_path = self.dataset_path / "labels"
        self.metadata_path = self.dataset_path / "metadata.json"
        
        # 创建目录结构
        self.images_path.mkdir(parents=True, exist_ok=True)
        self.labels_path.mkdir(parents=True, exist_ok=True)
        
        # 数据集元数据
        self.metadata = self._load_metadata()
        
        # 垃圾类型到类别ID的映射
        self.class_mapping = {
            WasteType.PLASTIC: 0,
            WasteType.PAPER: 1,
            WasteType.ORGANIC: 2,
            WasteType.GLASS: 3,
            WasteType.METAL: 4,
            WasteType.HAZARDOUS: 5,
            WasteType.ELECTRONIC: 6,
            WasteType.WET: 7,
            WasteType.DRY: 8,
            WasteType.RECYCLABLE: 9
        }
        
        self.id_to_class = {v: k for k, v in self.class_mapping.items()}
    
    def _load_metadata(self) -> Dict[str, Any]:
        """加载数据集元数据"""
        if self.metadata_path.exists():
            with open(self.metadata_path, 'r', encoding='utf-8') as f:
                return json.load(f)
        else:
            return {
                'created_at': datetime.now().isoformat(),
                'total_images': 0,
                'total_annotations': 0,
                'class_distribution': {},
                'version': '1.0'
            }
    
    def _save_metadata(self):
        """保存数据集元数据"""
        with open(self.metadata_path, 'w', encoding='utf-8') as f:
            json.dump(self.metadata, f, ensure_ascii=False, indent=2)
    
    def add_sample(self, image: np.ndarray, annotations: List[Dict[str, Any]], image_id: str = None) -> str:
        """添加样本到数据集"""
        if image_id is None:
            image_id = str(uuid4())
        
        # 保存图像
        image_filename = f"{image_id}.jpg"
        image_path = self.images_path / image_filename
        cv2.imwrite(str(image_path), image)
        
        # 转换标注格式为YOLO格式
        yolo_annotations = self._convert_to_yolo_format(annotations, image.shape)
        
        # 保存标注
        label_filename = f"{image_id}.txt"
        label_path = self.labels_path / label_filename
        
        with open(label_path, 'w') as f:
            for annotation in yolo_annotations:
                f.write(f"{annotation['class_id']} {annotation['x_center']} {annotation['y_center']} {annotation['width']} {annotation['height']}\n")
        
        # 更新元数据
        self.metadata['total_images'] += 1
        self.metadata['total_annotations'] += len(annotations)
        
        for annotation in annotations:
            waste_type = annotation.get('waste_type', 'unknown')
            if waste_type in self.metadata['class_distribution']:
                self.metadata['class_distribution'][waste_type] += 1
            else:
                self.metadata['class_distribution'][waste_type] = 1
        
        self._save_metadata()
        
        logger.info(f"Added sample {image_id} with {len(annotations)} annotations")
        return image_id
    
    def _convert_to_yolo_format(self, annotations: List[Dict[str, Any]], image_shape: Tuple[int, int, int]) -> List[Dict[str, Any]]:
        """转换标注格式为YOLO格式"""
        height, width = image_shape[:2]
        yolo_annotations = []
        
        for annotation in annotations:
            waste_type = WasteType(annotation['waste_type'])
            class_id = self.class_mapping.get(waste_type, 0)
            
            # 边界框坐标 [x1, y1, x2, y2]
            bbox = annotation['bounding_box']
            x1, y1, x2, y2 = bbox
            
            # 转换为YOLO格式 (中心点坐标和宽高，归一化)
            x_center = (x1 + x2) / 2 / width
            y_center = (y1 + y2) / 2 / height
            bbox_width = (x2 - x1) / width
            bbox_height = (y2 - y1) / height
            
            yolo_annotations.append({
                'class_id': class_id,
                'x_center': x_center,
                'y_center': y_center,
                'width': bbox_width,
                'height': bbox_height
            })
        
        return yolo_annotations
    
    def get_statistics(self) -> Dict[str, Any]:
        """获取数据集统计信息"""
        return {
            'total_images': self.metadata['total_images'],
            'total_annotations': self.metadata['total_annotations'],
            'class_distribution': self.metadata['class_distribution'],
            'average_annotations_per_image': (
                self.metadata['total_annotations'] / self.metadata['total_images']
                if self.metadata['total_images'] > 0 else 0
            ),
            'dataset_path': str(self.dataset_path),
            'created_at': self.metadata['created_at']
        }
    
    def split_dataset(self, train_ratio: float = 0.8, val_ratio: float = 0.1, test_ratio: float = 0.1):
        """分割数据集"""
        if abs(train_ratio + val_ratio + test_ratio - 1.0) > 1e-6:
            raise ValueError("训练、验证和测试比例之和必须等于1")
        
        # 获取所有图像文件
        image_files = list(self.images_path.glob("*.jpg"))
        total_images = len(image_files)
        
        if total_images == 0:
            raise ValueError("数据集中没有图像文件")
        
        # 随机打乱
        np.random.shuffle(image_files)
        
        # 计算分割点
        train_end = int(total_images * train_ratio)
        val_end = train_end + int(total_images * val_ratio)
        
        # 分割文件列表
        train_files = image_files[:train_end]
        val_files = image_files[train_end:val_end]
        test_files = image_files[val_end:]
        
        # 创建分割目录
        splits = {
            'train': train_files,
            'val': val_files,
            'test': test_files
        }
        
        for split_name, files in splits.items():
            split_images_dir = self.dataset_path / split_name / "images"
            split_labels_dir = self.dataset_path / split_name / "labels"
            
            split_images_dir.mkdir(parents=True, exist_ok=True)
            split_labels_dir.mkdir(parents=True, exist_ok=True)
            
            for image_file in files:
                # 复制图像文件
                shutil.copy2(image_file, split_images_dir / image_file.name)
                
                # 复制对应的标注文件
                label_file = self.labels_path / f"{image_file.stem}.txt"
                if label_file.exists():
                    shutil.copy2(label_file, split_labels_dir / label_file.name)
        
        logger.info(f"Dataset split completed: train={len(train_files)}, val={len(val_files)}, test={len(test_files)}")
        
        return {
            'train': len(train_files),
            'val': len(val_files),
            'test': len(test_files)
        }


class WasteModelTrainer:
    """垃圾检测模型训练器"""
    
    def __init__(self, model_dir: str = "models/waste_detection"):
        self.model_dir = Path(model_dir)
        self.model_dir.mkdir(parents=True, exist_ok=True)
        
        self.training_history = []
        self.best_model_path = None
        self.current_model = None
        
        # 训练配置
        self.training_config = {
            'epochs': 100,
            'batch_size': 16,
            'learning_rate': 0.001,
            'image_size': 640,
            'patience': 10,
            'min_delta': 0.001
        }
    
    async def train_model(self, dataset: WasteDataset, config: Dict[str, Any] = None) -> Dict[str, Any]:
        """训练模型"""
        if not ML_AVAILABLE:
            logger.warning("Machine learning libraries not available. Cannot train model.")
            return await self._mock_training(dataset)
        
        # 更新训练配置
        if config:
            self.training_config.update(config)
        
        try:
            # 创建YAML配置文件
            yaml_config = self._create_yaml_config(dataset)
            
            # 初始化模型
            model = YOLO('yolov8n.pt')  # 使用预训练模型
            
            # 开始训练
            logger.info("Starting model training...")
            start_time = datetime.now()
            
            results = model.train(
                data=yaml_config,
                epochs=self.training_config['epochs'],
                batch=self.training_config['batch_size'],
                lr0=self.training_config['learning_rate'],
                imgsz=self.training_config['image_size'],
                patience=self.training_config['patience'],
                project=str(self.model_dir),
                name='waste_detection_training',
                exist_ok=True,
                verbose=True
            )
            
            end_time = datetime.now()
            training_duration = (end_time - start_time).total_seconds()
            
            # 保存最佳模型
            best_model_path = self.model_dir / 'waste_detection_training' / 'weights' / 'best.pt'
            if best_model_path.exists():
                self.best_model_path = str(best_model_path)
                self.current_model = YOLO(self.best_model_path)
            
            # 记录训练历史
            training_record = {
                'training_id': str(uuid4()),
                'start_time': start_time.isoformat(),
                'end_time': end_time.isoformat(),
                'duration_seconds': training_duration,
                'config': self.training_config.copy(),
                'dataset_stats': dataset.get_statistics(),
                'model_path': self.best_model_path,
                'results': self._extract_training_results(results)
            }
            
            self.training_history.append(training_record)
            
            # 保存训练历史
            self._save_training_history()
            
            logger.info(f"Model training completed in {training_duration:.2f} seconds")
            return training_record
            
        except Exception as e:
            logger.error(f"Model training failed: {e}")
            raise
    
    def _create_yaml_config(self, dataset: WasteDataset) -> str:
        """创建YOLO训练配置文件"""
        yaml_content = f"""
# Waste Detection Dataset Configuration
path: {dataset.dataset_path}
train: train/images
val: val/images
test: test/images

# Classes
nc: {len(dataset.class_mapping)}  # number of classes
names: {list(dataset.class_mapping.keys())}  # class names
"""
        
        yaml_path = self.model_dir / 'waste_dataset.yaml'
        with open(yaml_path, 'w', encoding='utf-8') as f:
            f.write(yaml_content)
        
        return str(yaml_path)
    
    def _extract_training_results(self, results) -> Dict[str, Any]:
        """提取训练结果"""
        try:
            # 提取关键指标
            metrics = {
                'final_map50': float(results.results_dict.get('metrics/mAP50(B)', 0)),
                'final_map50_95': float(results.results_dict.get('metrics/mAP50-95(B)', 0)),
                'final_precision': float(results.results_dict.get('metrics/precision(B)', 0)),
                'final_recall': float(results.results_dict.get('metrics/recall(B)', 0)),
                'final_loss': float(results.results_dict.get('train/box_loss', 0))
            }
            return metrics
        except Exception as e:
            logger.warning(f"Failed to extract training results: {e}")
            return {}
    
    async def _mock_training(self, dataset: WasteDataset) -> Dict[str, Any]:
        """模拟训练过程"""
        logger.info("Running mock training...")
        
        # 模拟训练时间
        await asyncio.sleep(2.0)
        
        start_time = datetime.now()
        end_time = start_time + timedelta(seconds=2)
        
        # 创建模拟训练记录
        training_record = {
            'training_id': str(uuid4()),
            'start_time': start_time.isoformat(),
            'end_time': end_time.isoformat(),
            'duration_seconds': 2.0,
            'config': self.training_config.copy(),
            'dataset_stats': dataset.get_statistics(),
            'model_path': 'mock_model.pt',
            'results': {
                'final_map50': 0.85,
                'final_map50_95': 0.72,
                'final_precision': 0.88,
                'final_recall': 0.82,
                'final_loss': 0.15
            }
        }
        
        self.training_history.append(training_record)
        logger.info("Mock training completed")
        
        return training_record
    
    def _save_training_history(self):
        """保存训练历史"""
        history_path = self.model_dir / 'training_history.json'
        with open(history_path, 'w', encoding='utf-8') as f:
            json.dump(self.training_history, f, ensure_ascii=False, indent=2)
    
    def _load_training_history(self):
        """加载训练历史"""
        history_path = self.model_dir / 'training_history.json'
        if history_path.exists():
            with open(history_path, 'r', encoding='utf-8') as f:
                self.training_history = json.load(f)
    
    async def validate_model(self, dataset: WasteDataset, model_path: str = None) -> Dict[str, Any]:
        """验证模型"""
        if not ML_AVAILABLE:
            return await self._mock_validation()
        
        model_path = model_path or self.best_model_path
        if not model_path or not Path(model_path).exists():
            raise ValueError("Model not found")
        
        try:
            model = YOLO(model_path)
            
            # 在测试集上验证
            test_images_dir = dataset.dataset_path / 'test' / 'images'
            if not test_images_dir.exists():
                raise ValueError("Test dataset not found")
            
            # 运行验证
            results = model.val(
                data=self._create_yaml_config(dataset),
                split='test',
                verbose=True
            )
            
            # 提取验证结果
            validation_results = {
                'validation_id': str(uuid4()),
                'model_path': model_path,
                'timestamp': datetime.now().isoformat(),
                'metrics': {
                    'map50': float(results.results_dict.get('metrics/mAP50(B)', 0)),
                    'map50_95': float(results.results_dict.get('metrics/mAP50-95(B)', 0)),
                    'precision': float(results.results_dict.get('metrics/precision(B)', 0)),
                    'recall': float(results.results_dict.get('metrics/recall(B)', 0))
                },
                'dataset_stats': dataset.get_statistics()
            }
            
            logger.info(f"Model validation completed: mAP50={validation_results['metrics']['map50']:.3f}")
            return validation_results
            
        except Exception as e:
            logger.error(f"Model validation failed: {e}")
            raise
    
    async def _mock_validation(self) -> Dict[str, Any]:
        """模拟验证过程"""
        await asyncio.sleep(0.5)
        
        return {
            'validation_id': str(uuid4()),
            'model_path': 'mock_model.pt',
            'timestamp': datetime.now().isoformat(),
            'metrics': {
                'map50': 0.83,
                'map50_95': 0.70,
                'precision': 0.86,
                'recall': 0.80
            },
            'dataset_stats': {}
        }
    
    def get_training_history(self) -> List[Dict[str, Any]]:
        """获取训练历史"""
        return self.training_history.copy()
    
    def get_best_model_path(self) -> Optional[str]:
        """获取最佳模型路径"""
        return self.best_model_path
    
    def load_model(self, model_path: str):
        """加载模型"""
        if ML_AVAILABLE and Path(model_path).exists():
            self.current_model = YOLO(model_path)
            self.best_model_path = model_path
            logger.info(f"Model loaded from {model_path}")
        else:
            logger.warning(f"Cannot load model from {model_path}")


class ModelAccuracyValidator:
    """模型精度验证器"""
    
    def __init__(self):
        self.validation_results = []
    
    async def validate_accuracy(self, model, test_dataset: WasteDataset, confidence_threshold: float = 0.5) -> Dict[str, Any]:
        """验证模型精度"""
        if not ML_AVAILABLE:
            return await self._mock_accuracy_validation()
        
        try:
            test_images_dir = test_dataset.dataset_path / 'test' / 'images'
            test_labels_dir = test_dataset.dataset_path / 'test' / 'labels'
            
            if not test_images_dir.exists() or not test_labels_dir.exists():
                raise ValueError("Test dataset not found")
            
            # 收集预测和真实标签
            all_predictions = []
            all_ground_truth = []
            
            image_files = list(test_images_dir.glob("*.jpg"))
            
            for image_file in image_files:
                # 加载图像
                image = cv2.imread(str(image_file))
                if image is None:
                    continue
                
                # 模型预测
                results = model(image, conf=confidence_threshold, verbose=False)
                
                # 提取预测结果
                predictions = []
                for result in results:
                    boxes = result.boxes
                    if boxes is not None:
                        for box in boxes:
                            class_id = int(box.cls[0].cpu().numpy())
                            confidence = float(box.conf[0].cpu().numpy())
                            predictions.append({
                                'class_id': class_id,
                                'confidence': confidence
                            })
                
                # 加载真实标签
                label_file = test_labels_dir / f"{image_file.stem}.txt"
                ground_truth = []
                if label_file.exists():
                    with open(label_file, 'r') as f:
                        for line in f:
                            parts = line.strip().split()
                            if len(parts) >= 5:
                                class_id = int(parts[0])
                                ground_truth.append({'class_id': class_id})
                
                all_predictions.extend([p['class_id'] for p in predictions])
                all_ground_truth.extend([gt['class_id'] for gt in ground_truth])
            
            # 计算精度指标
            if all_predictions and all_ground_truth:
                # 确保长度一致（简化处理）
                min_len = min(len(all_predictions), len(all_ground_truth))
                all_predictions = all_predictions[:min_len]
                all_ground_truth = all_ground_truth[:min_len]
                
                # 计算精确率、召回率、F1分数
                precision, recall, f1, support = precision_recall_fscore_support(
                    all_ground_truth, all_predictions, average='weighted', zero_division=0
                )
                
                # 计算混淆矩阵
                cm = confusion_matrix(all_ground_truth, all_predictions)
                
                accuracy_results = {
                    'validation_id': str(uuid4()),
                    'timestamp': datetime.now().isoformat(),
                    'test_samples': len(image_files),
                    'confidence_threshold': confidence_threshold,
                    'metrics': {
                        'precision': float(precision),
                        'recall': float(recall),
                        'f1_score': float(f1),
                        'accuracy': float(np.trace(cm) / np.sum(cm)) if np.sum(cm) > 0 else 0.0
                    },
                    'confusion_matrix': cm.tolist(),
                    'class_distribution': {
                        'predictions': dict(zip(*np.unique(all_predictions, return_counts=True))),
                        'ground_truth': dict(zip(*np.unique(all_ground_truth, return_counts=True)))
                    }
                }
                
                self.validation_results.append(accuracy_results)
                
                logger.info(f"Accuracy validation completed: precision={precision:.3f}, recall={recall:.3f}, f1={f1:.3f}")
                return accuracy_results
            
            else:
                raise ValueError("No valid predictions or ground truth found")
                
        except Exception as e:
            logger.error(f"Accuracy validation failed: {e}")
            raise
    
    async def _mock_accuracy_validation(self) -> Dict[str, Any]:
        """模拟精度验证"""
        await asyncio.sleep(1.0)
        
        return {
            'validation_id': str(uuid4()),
            'timestamp': datetime.now().isoformat(),
            'test_samples': 100,
            'confidence_threshold': 0.5,
            'metrics': {
                'precision': 0.87,
                'recall': 0.84,
                'f1_score': 0.85,
                'accuracy': 0.86
            },
            'confusion_matrix': [[45, 3, 2], [4, 38, 1], [2, 1, 4]],
            'class_distribution': {
                'predictions': {0: 47, 1: 42, 2: 11},
                'ground_truth': {0: 50, 1: 43, 2: 7}
            }
        }
    
    def generate_accuracy_report(self, validation_result: Dict[str, Any]) -> str:
        """生成精度报告"""
        metrics = validation_result['metrics']
        
        report = f"""
# 垃圾检测模型精度验证报告

## 基本信息
- 验证ID: {validation_result['validation_id']}
- 验证时间: {validation_result['timestamp']}
- 测试样本数: {validation_result['test_samples']}
- 置信度阈值: {validation_result['confidence_threshold']}

## 精度指标
- 精确率 (Precision): {metrics['precision']:.3f}
- 召回率 (Recall): {metrics['recall']:.3f}
- F1分数: {metrics['f1_score']:.3f}
- 准确率 (Accuracy): {metrics['accuracy']:.3f}

## 分析结果
"""
        
        if metrics['precision'] >= 0.9:
            report += "- ✅ 精确率优秀，误报率很低\n"
        elif metrics['precision'] >= 0.8:
            report += "- ✅ 精确率良好\n"
        else:
            report += "- ⚠️ 精确率需要改进，存在较多误报\n"
        
        if metrics['recall'] >= 0.9:
            report += "- ✅ 召回率优秀，漏检率很低\n"
        elif metrics['recall'] >= 0.8:
            report += "- ✅ 召回率良好\n"
        else:
            report += "- ⚠️ 召回率需要改进，存在较多漏检\n"
        
        if metrics['f1_score'] >= 0.85:
            report += "- ✅ 综合性能优秀\n"
        elif metrics['f1_score'] >= 0.75:
            report += "- ✅ 综合性能良好\n"
        else:
            report += "- ⚠️ 综合性能需要改进\n"
        
        return report


class WasteModelManager:
    """垃圾检测模型管理器"""
    
    def __init__(self, base_dir: str = "data/waste_detection"):
        self.base_dir = Path(base_dir)
        self.base_dir.mkdir(parents=True, exist_ok=True)
        
        self.dataset = WasteDataset(str(self.base_dir / "dataset"))
        self.trainer = WasteModelTrainer(str(self.base_dir / "models"))
        self.validator = ModelAccuracyValidator()
        
        # 加载训练历史
        self.trainer._load_training_history()
    
    async def create_training_dataset(self, samples: List[Dict[str, Any]]) -> Dict[str, Any]:
        """创建训练数据集"""
        logger.info(f"Creating training dataset with {len(samples)} samples")
        
        for sample in samples:
            image = sample['image']
            annotations = sample['annotations']
            image_id = sample.get('image_id')
            
            self.dataset.add_sample(image, annotations, image_id)
        
        # 分割数据集
        split_info = self.dataset.split_dataset()
        
        dataset_stats = self.dataset.get_statistics()
        dataset_stats['split_info'] = split_info
        
        logger.info(f"Training dataset created: {dataset_stats}")
        return dataset_stats
    
    async def train_waste_detection_model(self, training_config: Dict[str, Any] = None) -> Dict[str, Any]:
        """训练垃圾检测模型"""
        logger.info("Starting waste detection model training")
        
        # 检查数据集是否存在
        if self.dataset.metadata['total_images'] == 0:
            raise ValueError("No training data available. Please create a dataset first.")
        
        # 开始训练
        training_result = await self.trainer.train_model(self.dataset, training_config)
        
        logger.info(f"Model training completed: {training_result['training_id']}")
        return training_result
    
    async def validate_model_accuracy(self, model_path: str = None, confidence_threshold: float = 0.5) -> Dict[str, Any]:
        """验证模型精度"""
        logger.info("Starting model accuracy validation")
        
        # 加载模型
        if model_path:
            self.trainer.load_model(model_path)
        
        if not self.trainer.current_model:
            raise ValueError("No model available for validation")
        
        # 验证精度
        validation_result = await self.validator.validate_accuracy(
            self.trainer.current_model, self.dataset, confidence_threshold
        )
        
        logger.info(f"Model validation completed: {validation_result['validation_id']}")
        return validation_result
    
    async def generate_model_report(self) -> Dict[str, Any]:
        """生成模型报告"""
        report = {
            'report_id': str(uuid4()),
            'generated_at': datetime.now().isoformat(),
            'dataset_statistics': self.dataset.get_statistics(),
            'training_history': self.trainer.get_training_history(),
            'validation_results': self.validator.validation_results,
            'current_model': {
                'path': self.trainer.get_best_model_path(),
                'available': self.trainer.current_model is not None
            }
        }
        
        # 生成总结
        if report['training_history']:
            latest_training = report['training_history'][-1]
            report['summary'] = {
                'total_trainings': len(report['training_history']),
                'latest_training_date': latest_training['start_time'],
                'latest_training_duration': latest_training['duration_seconds'],
                'latest_results': latest_training['results']
            }
        
        if report['validation_results']:
            latest_validation = report['validation_results'][-1]
            report['summary']['latest_validation'] = {
                'date': latest_validation['timestamp'],
                'accuracy': latest_validation['metrics']['accuracy'],
                'f1_score': latest_validation['metrics']['f1_score']
            }
        
        return report
    
    def get_model_statistics(self) -> Dict[str, Any]:
        """获取模型统计信息"""
        return {
            'dataset_stats': self.dataset.get_statistics(),
            'training_count': len(self.trainer.training_history),
            'validation_count': len(self.validator.validation_results),
            'current_model_available': self.trainer.current_model is not None,
            'best_model_path': self.trainer.get_best_model_path()
        }


# 全局模型管理器实例
waste_model_manager = WasteModelManager()