"""
模型管理服务
功能：处理模型下载、训练、切换等操作，支持RAVDESS数据集
使用场景：Web UI的模型管理功能后端支持
"""

import os
import json
import requests
import pickle
import logging
import zipfile
from typing import Dict, Any, Optional, List
from pathlib import Path
import tempfile
from datetime import datetime
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import classification_report
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score

from .dataset_downloader import DatasetDownloader
from features.audio_features import AudioFeatureExtractor

logger = logging.getLogger(__name__)

class TrainingStatus:
    """训练状态管理类"""
    
    def __init__(self):
        """初始化训练状态"""
        self.is_training = False
        self.progress = 0
        self.stage = "idle"
        self.message = "无训练任务"
    
    def update(self, stage: str, progress: int, message: str):
        """
        更新训练状态
        参数：
            stage - 训练阶段
            progress - 进度百分比
            message - 状态消息
        """
        self.stage = stage
        self.progress = progress
        self.message = message
        self.is_training = stage not in ["idle", "completed", "error"]
    
    def to_dict(self) -> Dict[str, Any]:
        """转换为字典格式"""
        return {
            "is_training": self.is_training,
            "progress": self.progress,
            "stage": self.stage,
            "message": self.message
        }

class ModelService:
    """模型管理服务类"""
    
    def __init__(self, models_dir: str = "models", config_file: str = "config/models_config.json"):
        """
        初始化模型服务
        参数：
            models_dir - 模型存储目录
            config_file - 配置文件路径
        """
        self.models_dir = Path(models_dir)
        self.models_dir.mkdir(exist_ok=True)
        self.current_model_path = self.models_dir / "current_model.pkl"
        self.datasets_dir = Path("datasets")
        self.datasets_dir.mkdir(exist_ok=True)
        
        # 初始化训练状态管理器
        self.training_status = TrainingStatus()
        
        # 初始化数据集下载器
        self.dataset_downloader = DatasetDownloader(str(self.datasets_dir))
        
        # 初始化特征提取器
        self.feature_extractor = AudioFeatureExtractor()
        
        # 加载配置文件
        self.config = self._load_config(config_file)
        
        # RAVDESS数据集配置（从下载器获取）
        self.ravdess_config = self.dataset_downloader.ravdess_config

    def _load_config(self, config_file: str) -> Dict[str, Any]:
        """
        加载JSON配置文件
        参数：config_file - 配置文件路径
        返回：配置字典
        """
        config_path = Path(config_file)
        if not config_path.exists():
            config_path = Path(__file__).parent.parent / config_file
        
        if not config_path.exists():
            raise FileNotFoundError(f"配置文件不存在: {config_file}")
        
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                return json.load(f)
        except json.JSONDecodeError as e:
            raise ValueError(f"配置文件格式错误: {e}")
        except Exception as e:
            raise RuntimeError(f"加载配置文件失败: {e}")

    async def get_available_models(self) -> Dict[str, Any]:
        """
        获取可用模型列表
        返回：包含本地模型的信息
        """
        try:
            # 检查本地模型
            local_models = []
            for model_file in self.models_dir.glob("*.pkl"):
                if model_file.name != "current_model.pkl":
                    stat = model_file.stat()
                    local_models.append({
                        "id": model_file.stem,
                        "name": model_file.stem,
                        "type": "local",
                        "size": f"{stat.st_size / (1024*1024):.1f}MB",
                        "created": datetime.fromtimestamp(stat.st_ctime).strftime("%Y-%m-%d %H:%M"),
                        "is_current": model_file.samefile(self.current_model_path) if self.current_model_path.exists() else False
                    })

            # 检查当前激活的模型
            current_model = None
            current_model_info = None
            if self.current_model_path.exists():
                current_model = "current"
                # 查找当前模型对应的原始模型文件
                for model in local_models:
                    model_path = self.models_dir / f"{model['id']}.pkl"
                    try:
                        if model_path.exists() and model_path.samefile(self.current_model_path):
                            current_model_info = model
                            break
                    except (OSError, ValueError):
                        # 如果文件比较失败，通过内容比较
                        try:
                            import filecmp
                            if filecmp.cmp(model_path, self.current_model_path, shallow=False):
                                current_model_info = model
                                break
                        except:
                            continue
                
                # 如果找不到对应的模型文件，读取当前模型信息
                if not current_model_info:
                    try:
                        with open(self.current_model_path, 'rb') as f:
                            model_data = pickle.load(f)
                        current_model_info = {
                            "id": "current",
                            "name": f"{model_data.get('model_type', 'unknown')}_{model_data.get('dataset', 'unknown')}",
                            "type": model_data.get('model_type', 'unknown'),
                            "size": f"{self.current_model_path.stat().st_size / (1024*1024):.1f}MB",
                            "created": datetime.fromtimestamp(self.current_model_path.stat().st_ctime).strftime("%Y-%m-%d %H:%M"),
                            "is_current": True,
                            "accuracy": model_data.get('accuracy', 0),
                            "emotions": model_data.get('emotions', [])
                        }
                    except Exception as e:
                        logger.warning(f"无法读取当前模型信息: {e}")
                        current_model_info = {
                            "id": "current",
                            "name": "当前模型",
                            "type": "unknown",
                            "size": f"{self.current_model_path.stat().st_size / (1024*1024):.1f}MB",
                            "created": datetime.fromtimestamp(self.current_model_path.stat().st_ctime).strftime("%Y-%m-%d %H:%M"),
                            "is_current": True
                        }

            # 检查是否有RAVDESS数据集
            has_ravdess = self._check_ravdess_dataset()

            return {
                "local_models": local_models,
                "current_model": current_model,
                "current_model_info": current_model_info,
                "has_model": self.current_model_path.exists(),
                "has_ravdess_dataset": has_ravdess,
                "dataset_info": {
                    "name": self.ravdess_config["name"],
                    "path": str(self.ravdess_config["local_path"]),
                    "all_emotions": self.ravdess_config["all_emotions"],
                    "emotions_count": int(self.ravdess_config["emotions_count"]),  # 确保是 Python int
                    "description": self.ravdess_config["description"]
                },
                "model_types": self.config["model_types"]
            }
        except Exception as e:
            logger.error(f"获取模型列表失败: {e}")
            raise

    def _check_ravdess_dataset(self) -> bool:
        """检查RAVDESS数据集是否存在"""
        return self.dataset_downloader._check_ravdess_exists()

    async def download_ravdess_dataset(self) -> Dict[str, Any]:
        """
        下载RAVDESS数据集
        返回：下载结果信息
        """
        try:
            return await self.dataset_downloader.download_ravdess_auto()
        except Exception as e:
            logger.error(f"下载RAVDESS数据集失败: {e}")
            raise

    def _organize_ravdess_files(self):
        """重新组织RAVDESS文件结构"""
        source_dir = self.datasets_dir
        target_dir = self.ravdess_config["local_path"]
        target_dir.mkdir(exist_ok=True)
        
        # 查找所有wav文件并按情感分类组织
        for wav_file in source_dir.rglob("*.wav"):
            # RAVDESS文件名格式: 03-01-06-01-02-01-12.wav
            # 第三个数字表示情感类别
            try:
                emotion_code = wav_file.stem.split('-')[2]
                if emotion_code in self.ravdess_config["emotions_mapping"]:
                    emotion = self.ravdess_config["emotions_mapping"][emotion_code]
                    
                    # 保留所有情感类别
                    emotion_dir = target_dir / emotion
                    emotion_dir.mkdir(exist_ok=True)
                    
                    # 复制文件
                    import shutil
                    shutil.copy2(wav_file, emotion_dir / wav_file.name)
                        
            except (IndexError, ValueError) as e:
                logger.warning(f"解析文件名失败: {wav_file.name}, {e}")

    def _train_ravdess_model(self, training_params: dict) -> dict:
        """
        训练RAVDESS情感识别模型
        
        Args:
            training_params: 训练参数，包含model_type等
            
        Returns:
            训练结果字典
        """
        try:
            # 更新训练状态
            self.training_status.update("loading_data", 10, "正在加载RAVDESS数据集...")
            print("\n" + "="*60)
            print("🎯 开始RAVDESS情感识别模型训练")
            print("="*60)
            print("📂 正在加载RAVDESS数据集...")
            
            # RAVDESS情感标签映射（根据官方文档）
            emotion_mapping = {
                '01': 'neutral',   # 中性
                '02': 'calm',      # 平静  
                '03': 'happy',     # 快乐
                '04': 'sad',       # 悲伤
                '05': 'angry',     # 愤怒
                '06': 'fearful',   # 恐惧
                '07': 'disgust',   # 厌恶
                '08': 'surprised'  # 惊讶
            }
            
            # 获取数据集路径
            dataset_path = self.datasets_dir / "ravdess"
            print(f"📁 数据集路径: {dataset_path}")
            
            if not dataset_path.exists():
                print(f"❌ 错误: RAVDESS数据集路径不存在: {dataset_path}")
                raise FileNotFoundError(f"RAVDESS数据集路径不存在: {dataset_path}")
            
            # 查找所有wav文件
            audio_files = list(dataset_path.glob("*.wav"))
            
            if not audio_files:
                print(f"❌ 错误: 在{dataset_path}中未找到wav文件")
                raise FileNotFoundError(f"在{dataset_path}中未找到wav文件")
            
            print(f"✅ 找到 {len(audio_files)} 个音频文件")
            logger.info(f"找到 {len(audio_files)} 个音频文件")
            
            # 解析文件名获取情感标签
            features = []
            labels = []
            valid_files = []
            
            self.training_status.update("feature_extraction", 20, "正在从文件名解析情感标签...")
            print("\n🔍 解析文件名获取情感标签...")
            
            for audio_file in audio_files:
                try:
                    # 解析RAVDESS文件名格式: Modality-Channel-Emotion-Intensity-Statement-Repetition-Actor.wav
                    filename_parts = audio_file.stem.split('-')
                    
                    if len(filename_parts) != 7:
                        logger.warning(f"跳过格式不正确的文件: {audio_file.name}")
                        continue
                    
                    emotion_code = filename_parts[2]  # 第3个位置是情感编码
                    
                    if emotion_code not in emotion_mapping:
                        logger.warning(f"跳过未知情感编码的文件: {audio_file.name} (emotion_code: {emotion_code})")
                        continue
                    
                    emotion_label = emotion_mapping[emotion_code]
                    valid_files.append((audio_file, emotion_label))
                    
                except Exception as e:
                    logger.warning(f"解析文件名失败 {audio_file.name}: {e}")
                    continue
            
            if not valid_files:
                print("❌ 错误: 没有找到有效的RAVDESS音频文件")
                raise ValueError("没有找到有效的RAVDESS音频文件")
            
            print(f"✅ 成功解析 {len(valid_files)} 个有效文件")
            logger.info(f"成功解析 {len(valid_files)} 个有效文件")
            
            # 统计情感分布
            emotion_counts = {}
            for _, emotion in valid_files:
                emotion_counts[emotion] = emotion_counts.get(emotion, 0) + 1
            
            print("\n📊 情感分布统计:")
            for emotion, count in emotion_counts.items():
                print(f"   {emotion}: {count} 个样本")
            logger.info(f"情感分布: {emotion_counts}")
            
            # 提取特征
            self.training_status.update("feature_extraction", 30, f"正在提取 {len(valid_files)} 个文件的音频特征...")
            print(f"\n🎵 开始提取 {len(valid_files)} 个文件的音频特征...")
            print("进度: ", end="")
            
            for i, (audio_file, emotion_label) in enumerate(valid_files):
                try:
                    # 提取音频特征
                    file_features = self.feature_extractor.extract_all_features(str(audio_file))
                    
                    if file_features is not None and len(file_features) > 0:
                        features.append(file_features)
                        labels.append(emotion_label)
                    else:
                        logger.warning(f"特征提取失败: {audio_file.name}")
                    
                    # 更新进度
                    progress = 30 + int((i + 1) / len(valid_files) * 40)
                    self.training_status.update("feature_extraction", progress, 
                                              f"已提取 {i+1}/{len(valid_files)} 个文件特征...")
                    
                    # 控制台进度显示
                    if (i + 1) % 20 == 0 or (i + 1) == len(valid_files):
                        print(f" {i+1}/{len(valid_files)}", end="")
                        if (i + 1) % 100 == 0:
                            print()
                            print("进度: ", end="")
                    
                except Exception as e:
                    print(f"\n⚠️  处理文件 {audio_file.name} 时出错: {e}")
                    logger.error(f"处理文件 {audio_file.name} 时出错: {e}")
                    continue
            
            print()  # 换行
            
            if not features:
                print("❌ 错误: 没有成功提取到任何特征")
                raise ValueError("没有成功提取到任何特征")
            
            # 转换为numpy数组
            X = np.array(features)
            y = np.array(labels)
            
            print(f"\n✅ 特征提取完成!")
            print(f"📏 特征矩阵形状: {X.shape}")
            print(f"🏷️  标签数量: {len(y)}")
            print(f"🎭 唯一情感: {set(y)}")
            
            logger.info(f"特征矩阵形状: {X.shape}")
            logger.info(f"标签数量: {len(y)}")
            logger.info(f"唯一情感: {set(y)}")
            
            # 数据预处理
            self.training_status.update("training", 70, "正在进行数据预处理...")
            print(f"\n⚙️  开始数据预处理...")
            
            # 标准化特征
            scaler = StandardScaler()
            X_scaled = scaler.fit_transform(X)
            print("✅ 特征标准化完成")
            
            # 编码标签
            label_encoder = LabelEncoder()
            y_encoded = label_encoder.fit_transform(y)
            print("✅ 标签编码完成")
            
            # 划分训练集和测试集
            X_train, X_test, y_train, y_test = train_test_split(
                X_scaled, y_encoded, test_size=0.2, random_state=42, stratify=y_encoded
            )
            print(f"✅ 数据集划分完成 - 训练集: {len(X_train)} 样本, 测试集: {len(X_test)} 样本")
            
            # 训练模型
            self.training_status.update("training", 80, "正在训练模型...")
            print(f"\n🧠 开始训练模型...")
            
            model_type = training_params.get('model_type', 'random_forest')
            print(f"📊 模型类型: {model_type}")
            
            if model_type == 'random_forest':
                model = RandomForestClassifier(n_estimators=100, random_state=42)
                print("🌳 使用随机森林分类器 (100棵树)")
            elif model_type == 'svm':
                model = SVC(kernel='rbf', random_state=42)
                print("🔍 使用支持向量机 (RBF核)")
            elif model_type == 'knn':
                model = KNeighborsClassifier(n_neighbors=5)
                print("👥 使用K近邻分类器 (K=5)")
            else:
                raise ValueError(f"不支持的模型类型: {model_type}")
            
            print("⏳ 模型训练中...")
            model.fit(X_train, y_train)
            print("✅ 模型训练完成!")
            
            # 评估模型
            self.training_status.update("training", 90, "正在评估模型性能...")
            print("\n📈 评估模型性能...")
            
            y_pred = model.predict(X_test)
            accuracy = accuracy_score(y_test, y_pred)
            
            print(f"🎯 模型准确率: {accuracy:.4f} ({accuracy:.2%})")
            logger.info(f"模型准确率: {accuracy:.4f}")
            
            # 保存模型
            self.training_status.update("saving", 95, "正在保存模型...")
            print("\n💾 保存模型...")
            
            model_data = {
                'model': model,
                'scaler': scaler,
                'label_encoder': label_encoder,
                'feature_config': self.feature_extractor.get_config(),
                'emotions': list(label_encoder.classes_),
                'accuracy': accuracy,
                'model_type': model_type,
                'dataset': 'ravdess',
                'total_samples': len(features),
                'emotion_distribution': emotion_counts
            }
            
            # 生成模型ID和保存路径
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            model_id = f"ravdess_{model_type}_{timestamp}"
            model_path = self.models_dir / f"{model_id}.pkl"
            
            print(f"📁 模型ID: {model_id}")
            print(f"📂 保存路径: {model_path}")
            
            # 保存模型
            with open(model_path, 'wb') as f:
                pickle.dump(model_data, f)
            print("✅ 模型文件保存完成")
            
            # 复制为当前模型
            import shutil
            shutil.copy2(model_path, self.current_model_path)
            print("✅ 设置为当前模型")
            
            self.training_status.update("completed", 100, f"训练完成! 准确率: {accuracy:.2%}")
            
            print("\n" + "="*60)
            print("🎉 RAVDESS模型训练完成!")
            print("="*60)
            print(f"📊 最终准确率: {accuracy:.4f} ({accuracy:.2%})")
            print(f"📁 模型已保存: {model_path}")
            print(f"🎭 支持情感: {list(label_encoder.classes_)}")
            print(f"📈 训练样本: {len(features)} 个")
            print("="*60)
            
            logger.info(f"模型已保存: {model_path}")
            
            return {
                'status': 'success',
                'model_id': model_id,
                'accuracy': float(accuracy),  # 转换为 Python float
                'total_samples': int(len(features)),  # 转换为 Python int
                'emotions': [str(emotion) for emotion in label_encoder.classes_],  # 转换为 Python str
                'emotion_distribution': {str(k): int(v) for k, v in emotion_counts.items()},  # 转换为 Python 类型
                'message': f'RAVDESS模型训练完成，准确率: {accuracy:.2%}'
            }
            
        except Exception as e:
            error_msg = f"RAVDESS模型训练失败: {str(e)}"
            print(f"\n❌ 训练失败!")
            print("="*60)
            print(f"错误信息: {error_msg}")
            print("="*60)
            logger.error(error_msg, exc_info=True)
            self.training_status.update("error", 0, error_msg)
            raise

    def _simulate_ravdess_training(self, model_type: str) -> dict:
        """模拟RAVDESS训练过程"""
        # 实际项目中这里应该是真实的特征提取和训练
        emotions = self.ravdess_config["all_emotions"]
        model_config = self.config["model_types"][model_type]
        
        if model_type == "svm":
            model = SVC(**model_config["parameters"])
        elif model_type == "random_forest":
            model = RandomForestClassifier(**model_config["parameters"])
        else:  # knn
            from sklearn.neighbors import KNeighborsClassifier
            model = KNeighborsClassifier(**model_config["parameters"])
        
        # 用模拟数据训练（使用完整的8类情感）
        np.random.seed(self.config["training_settings"]["random_state"])
        total_samples = self.ravdess_config["total_samples"]
        train_samples = int(total_samples * (1 - self.config["training_settings"]["test_size"]))
        feature_count = self.config["feature_extraction"]["total_features"]
        
        X_train = np.random.rand(train_samples, feature_count)
        y_train = np.random.choice(emotions, train_samples)
        
        model.fit(X_train, y_train)
        
        return {
            "model": model,
            "emotions": emotions,
            "accuracy": float(model_config["expected_accuracy"]),
            "dataset": self.ravdess_config["name"],
            "model_type": model_type,
            "features": [f["name"] for f in self.config["feature_extraction"]["features"]],
            "emotions_count": int(len(emotions)),
            "training_date": datetime.now().isoformat()
        }

    async def get_training_status(self) -> Dict[str, Any]:
        """
        获取模型训练状态
        返回：训练状态信息
        """
        return self.training_status.to_dict()

    async def switch_model(self, model_path: str) -> Dict[str, Any]:
        """
        切换当前使用的模型
        参数：model_path - 新模型的路径
        返回：切换结果信息
        """
        try:
            source_path = Path(model_path)
            if not source_path.exists():
                raise FileNotFoundError(f"模型文件不存在: {model_path}")

            # 直接复制新模型为当前模型（不进行备份）
            import shutil
            shutil.copy2(source_path, self.current_model_path)
            
            logger.info(f"模型切换成功: {model_path}")
            
            return {
                "status": "success",
                "message": "模型切换成功",
                "current_model": str(self.current_model_path)
            }
            
        except Exception as e:
            logger.error(f"切换模型失败: {e}")
            raise

    async def start_training(self, training_params: dict) -> dict:
        """
        启动模型训练
        
        Args:
            training_params: 训练参数字典
            
        Returns:
            训练结果字典
        """
        try:
            if self.training_status.is_training:
                return {
                    "status": "error", 
                    "message": "已有训练任务在进行中"
                }
            
            model_type = training_params.get('model_type', 'random_forest')
            
            # 检查RAVDESS数据集
            if not self._check_ravdess_dataset():
                return {
                    "status": "error",
                    "message": "RAVDESS数据集不存在，请先下载数据集"
                }
            
            # 启动RAVDESS训练（修改为同步调用）
            result = self._train_ravdess_model(training_params)
            return result
            
        except Exception as e:
            error_msg = f"启动训练失败: {str(e)}"
            logger.error(error_msg, exc_info=True)
            return {"status": "error", "message": error_msg}

    async def delete_model(self, model_id: str) -> Dict[str, Any]:
        """
        删除指定模型
        参数：model_id - 模型ID
        返回：删除结果
        """
        try:
            model_path = self.models_dir / f"{model_id}.pkl"
            
            if not model_path.exists():
                raise FileNotFoundError(f"模型不存在: {model_id}")
            
            # 检查是否为当前使用的模型
            if (self.current_model_path.exists() and 
                model_path.samefile(self.current_model_path)):
                raise ValueError("不能删除当前正在使用的模型")
            
            model_path.unlink()
            
            return {
                "status": "success",
                "message": f"模型 {model_id} 已删除"
            }
            
        except Exception as e:
            logger.error(f"删除模型失败: {e}")
            raise 