"""
AI判断功能模块
实现信号质量分析、异常检测、模式识别等功能
"""
import numpy as np
import pandas as pd
from typing import List, Dict, Any, Optional, Tuple
from datetime import datetime
import logging
from sklearn.ensemble import IsolationForest
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import DBSCAN
import json
import uuid

from .models import AIAnalysisIn, AIAnalysisOut, AnalysisType, SeverityLevel

logger = logging.getLogger(__name__)


class AISignalAnalyzer:
    """AI信号分析器"""
    
    def __init__(self):
        self.models = {}
        self.scalers = {}
        self.thresholds = {
            'signal_quality': {
                'excellent': -70,  # dBm
                'good': -85,
                'fair': -100,
                'poor': -120
            },
            'anomaly_detection': {
                'contamination': 0.1,  # 异常值比例
                'random_state': 42
            },
            'pattern_recognition': {
                'eps': 0.5,  # DBSCAN参数
                'min_samples': 5
            }
        }
    
    def analyze_signal_quality(self, signal_data: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        信号质量分析
        """
        try:
            if not signal_data:
                return self._create_analysis_result(
                    result_label="无数据",
                    confidence_score=0.0,
                    severity_level=SeverityLevel.LOW,
                    details={"error": "没有信号数据"}
                )
            
            # 提取信号强度数据
            signal_strengths = [record.get('signal_strength', -120) for record in signal_data]
            
            # 计算统计指标
            stats = self._calculate_signal_statistics(signal_strengths)
            
            # 质量评估
            quality_assessment = self._assess_signal_quality(stats)
            
            # 生成分析结果
            result = self._create_analysis_result(
                result_label=quality_assessment['label'],
                confidence_score=quality_assessment['confidence'],
                severity_level=quality_assessment['severity'],
                details={
                    'statistics': stats,
                    'quality_metrics': quality_assessment['metrics'],
                    'recommendations': quality_assessment['recommendations']
                }
            )
            
            return result
            
        except Exception as e:
            logger.error(f"信号质量分析失败: {str(e)}")
            return self._create_analysis_result(
                result_label="分析失败",
                confidence_score=0.0,
                severity_level=SeverityLevel.HIGH,
                details={"error": str(e)}
            )
    
    def detect_anomalies(self, signal_data: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        异常检测
        """
        try:
            if len(signal_data) < 10:
                return self._create_analysis_result(
                    result_label="数据不足",
                    confidence_score=0.0,
                    severity_level=SeverityLevel.LOW,
                    details={"error": "数据点少于10个，无法进行异常检测"}
                )
            
            # 准备特征数据
            features = self._extract_features(signal_data)
            
            # 异常检测
            anomaly_scores = self._perform_anomaly_detection(features)
            
            # 分析结果
            anomaly_analysis = self._analyze_anomalies(anomaly_scores, signal_data)
            
            return self._create_analysis_result(
                result_label=anomaly_analysis['label'],
                confidence_score=anomaly_analysis['confidence'],
                severity_level=anomaly_analysis['severity'],
                details=anomaly_analysis['details']
            )
            
        except Exception as e:
            logger.error(f"异常检测失败: {str(e)}")
            return self._create_analysis_result(
                result_label="检测失败",
                confidence_score=0.0,
                severity_level=SeverityLevel.HIGH,
                details={"error": str(e)}
            )
    
    def recognize_patterns(self, signal_data: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        模式识别
        """
        try:
            if len(signal_data) < 20:
                return self._create_analysis_result(
                    result_label="数据不足",
                    confidence_score=0.0,
                    severity_level=SeverityLevel.LOW,
                    details={"error": "数据点少于20个，无法进行模式识别"}
                )
            
            # 提取时间序列特征
            time_series = self._extract_time_series(signal_data)
            
            # 模式识别
            patterns = self._identify_patterns(time_series)
            
            # 分析结果
            pattern_analysis = self._analyze_patterns(patterns, signal_data)
            
            return self._create_analysis_result(
                result_label=pattern_analysis['label'],
                confidence_score=pattern_analysis['confidence'],
                severity_level=pattern_analysis['severity'],
                details=pattern_analysis['details']
            )
            
        except Exception as e:
            logger.error(f"模式识别失败: {str(e)}")
            return self._create_analysis_result(
                result_label="识别失败",
                confidence_score=0.0,
                severity_level=SeverityLevel.HIGH,
                details={"error": str(e)}
            )
    
    def _calculate_signal_statistics(self, signal_strengths: List[int]) -> Dict[str, float]:
        """计算信号统计指标"""
        if not signal_strengths:
            return {}
        
        signal_array = np.array(signal_strengths)
        
        return {
            'mean': float(np.mean(signal_array)),
            'median': float(np.median(signal_array)),
            'std': float(np.std(signal_array)),
            'min': float(np.min(signal_array)),
            'max': float(np.max(signal_array)),
            'range': float(np.max(signal_array) - np.min(signal_array)),
            'q25': float(np.percentile(signal_array, 25)),
            'q75': float(np.percentile(signal_array, 75)),
            'iqr': float(np.percentile(signal_array, 75) - np.percentile(signal_array, 25))
        }
    
    def _assess_signal_quality(self, stats: Dict[str, float]) -> Dict[str, Any]:
        """评估信号质量"""
        mean_signal = stats.get('mean', -120)
        std_signal = stats.get('std', 0)
        
        # 质量等级判定
        if mean_signal >= self.thresholds['signal_quality']['excellent']:
            quality_label = "优秀"
            severity = SeverityLevel.LOW
            confidence = 0.95
        elif mean_signal >= self.thresholds['signal_quality']['good']:
            quality_label = "良好"
            severity = SeverityLevel.LOW
            confidence = 0.85
        elif mean_signal >= self.thresholds['signal_quality']['fair']:
            quality_label = "一般"
            severity = SeverityLevel.MEDIUM
            confidence = 0.75
        else:
            quality_label = "较差"
            severity = SeverityLevel.HIGH
            confidence = 0.90
        
        # 稳定性评估
        stability_score = max(0, 1 - (std_signal / 30))  # 标准差越小越稳定
        if std_signal > 20:
            stability_label = "不稳定"
            severity = max(severity, SeverityLevel.MEDIUM)
        elif std_signal > 10:
            stability_label = "较稳定"
        else:
            stability_label = "稳定"
        
        # 生成建议
        recommendations = []
        if mean_signal < -100:
            recommendations.append("建议检查设备位置或天线连接")
        if std_signal > 15:
            recommendations.append("信号波动较大，建议检查环境干扰")
        if mean_signal < -85 and std_signal < 5:
            recommendations.append("信号强度偏低但稳定，可能需要调整设备位置")
        
        return {
            'label': f"{quality_label} ({stability_label})",
            'confidence': confidence,
            'severity': severity,
            'metrics': {
                'quality_level': quality_label,
                'stability_level': stability_label,
                'stability_score': stability_score
            },
            'recommendations': recommendations
        }
    
    def _extract_features(self, signal_data: List[Dict[str, Any]]) -> np.ndarray:
        """提取特征用于异常检测"""
        features = []
        
        for record in signal_data:
            feature_vector = [
                record.get('signal_strength', -120),
                record.get('rsrp', -120) if record.get('rsrp') else -120,
                record.get('rsrq', -20) if record.get('rsrq') else -20,
                record.get('rssi', -120) if record.get('rssi') else -120,
                record.get('snr', 0) if record.get('snr') else 0,
                record.get('latitude', 0) if record.get('latitude') else 0,
                record.get('longitude', 0) if record.get('longitude') else 0
            ]
            features.append(feature_vector)
        
        return np.array(features)
    
    def _perform_anomaly_detection(self, features: np.ndarray) -> np.ndarray:
        """执行异常检测"""
        # 标准化特征
        if 'anomaly_scaler' not in self.scalers:
            self.scalers['anomaly_scaler'] = StandardScaler()
        
        features_scaled = self.scalers['anomaly_scaler'].fit_transform(features)
        
        # 使用Isolation Forest进行异常检测
        if 'anomaly_model' not in self.models:
            self.models['anomaly_model'] = IsolationForest(
                contamination=self.thresholds['anomaly_detection']['contamination'],
                random_state=self.thresholds['anomaly_detection']['random_state']
            )
        
        anomaly_scores = self.models['anomaly_model'].fit_predict(features_scaled)
        return anomaly_scores
    
    def _analyze_anomalies(self, anomaly_scores: np.ndarray, signal_data: List[Dict[str, Any]]) -> Dict[str, Any]:
        """分析异常检测结果"""
        anomaly_count = np.sum(anomaly_scores == -1)
        total_count = len(anomaly_scores)
        anomaly_ratio = anomaly_count / total_count
        
        if anomaly_ratio == 0:
            label = "无异常"
            severity = SeverityLevel.LOW
            confidence = 0.95
        elif anomaly_ratio < 0.05:
            label = "轻微异常"
            severity = SeverityLevel.LOW
            confidence = 0.85
        elif anomaly_ratio < 0.15:
            label = "中等异常"
            severity = SeverityLevel.MEDIUM
            confidence = 0.80
        else:
            label = "严重异常"
            severity = SeverityLevel.HIGH
            confidence = 0.90
        
        # 找出异常点
        anomaly_indices = np.where(anomaly_scores == -1)[0]
        anomaly_points = [signal_data[i] for i in anomaly_indices]
        
        return {
            'label': label,
            'confidence': confidence,
            'severity': severity,
            'details': {
                'anomaly_count': int(anomaly_count),
                'total_count': int(total_count),
                'anomaly_ratio': float(anomaly_ratio),
                'anomaly_points': anomaly_points[:10]  # 只返回前10个异常点
            }
        }
    
    def _extract_time_series(self, signal_data: List[Dict[str, Any]]) -> np.ndarray:
        """提取时间序列数据"""
        time_series = []
        for record in signal_data:
            # 使用信号强度作为主要指标
            time_series.append(record.get('signal_strength', -120))
        return np.array(time_series)
    
    def _identify_patterns(self, time_series: np.ndarray) -> Dict[str, Any]:
        """识别时间序列模式"""
        patterns = {}
        
        # 趋势分析
        x = np.arange(len(time_series))
        trend_slope = np.polyfit(x, time_series, 1)[0]
        
        if abs(trend_slope) < 0.1:
            patterns['trend'] = 'stable'
        elif trend_slope > 0.1:
            patterns['trend'] = 'increasing'
        else:
            patterns['trend'] = 'decreasing'
        
        # 周期性检测（简化版）
        if len(time_series) > 50:
            # 计算自相关
            autocorr = np.correlate(time_series, time_series, mode='full')
            autocorr = autocorr[autocorr.size // 2:]
            
            # 寻找峰值
            peaks = []
            for i in range(1, len(autocorr) - 1):
                if autocorr[i] > autocorr[i-1] and autocorr[i] > autocorr[i+1]:
                    if autocorr[i] > np.mean(autocorr) + np.std(autocorr):
                        peaks.append(i)
            
            if len(peaks) > 0:
                patterns['periodicity'] = {
                    'has_period': True,
                    'dominant_period': int(np.median(peaks)),
                    'period_strength': float(np.max(autocorr[peaks]) / np.max(autocorr))
                }
            else:
                patterns['periodicity'] = {'has_period': False}
        else:
            patterns['periodicity'] = {'has_period': False}
        
        # 聚类分析
        if len(time_series) > 20:
            clustering = DBSCAN(
                eps=self.thresholds['pattern_recognition']['eps'],
                min_samples=self.thresholds['pattern_recognition']['min_samples']
            ).fit(time_series.reshape(-1, 1))
            
            n_clusters = len(set(clustering.labels_)) - (1 if -1 in clustering.labels_ else 0)
            patterns['clustering'] = {
                'n_clusters': n_clusters,
                'has_clusters': n_clusters > 1
            }
        else:
            patterns['clustering'] = {'n_clusters': 1, 'has_clusters': False}
        
        return patterns
    
    def _analyze_patterns(self, patterns: Dict[str, Any], signal_data: List[Dict[str, Any]]) -> Dict[str, Any]:
        """分析模式识别结果"""
        # 综合评估
        pattern_score = 0
        pattern_features = []
        
        # 趋势评估
        trend = patterns.get('trend', 'stable')
        if trend == 'stable':
            pattern_score += 0.3
            pattern_features.append('稳定趋势')
        elif trend == 'increasing':
            pattern_score += 0.2
            pattern_features.append('上升趋势')
        else:
            pattern_score += 0.1
            pattern_features.append('下降趋势')
        
        # 周期性评估
        periodicity = patterns.get('periodicity', {})
        if periodicity.get('has_period', False):
            pattern_score += 0.4
            period = periodicity.get('dominant_period', 0)
            pattern_features.append(f'周期性模式(周期:{period})')
        
        # 聚类评估
        clustering = patterns.get('clustering', {})
        if clustering.get('has_clusters', False):
            pattern_score += 0.3
            n_clusters = clustering.get('n_clusters', 1)
            pattern_features.append(f'聚类模式({n_clusters}个簇)')
        
        # 生成标签和置信度
        if pattern_score >= 0.7:
            label = "复杂模式"
            confidence = 0.85
            severity = SeverityLevel.LOW
        elif pattern_score >= 0.4:
            label = "简单模式"
            confidence = 0.75
            severity = SeverityLevel.LOW
        else:
            label = "随机模式"
            confidence = 0.65
            severity = SeverityLevel.MEDIUM
        
        return {
            'label': label,
            'confidence': confidence,
            'severity': severity,
            'details': {
                'pattern_score': pattern_score,
                'pattern_features': pattern_features,
                'trend_analysis': patterns.get('trend'),
                'periodicity_analysis': periodicity,
                'clustering_analysis': clustering
            }
        }
    
    def _create_analysis_result(self, result_label: str, confidence_score: float, 
                              severity_level: SeverityLevel, details: Dict[str, Any]) -> Dict[str, Any]:
        """创建分析结果"""
        return {
            'analysis_id': str(uuid.uuid4()),
            'result_label': result_label,
            'confidence_score': confidence_score,
            'severity_level': severity_level,
            'details': details,
            'model_version': 'v1.0',
            'processing_time_ms': 0  # 实际实现中应该记录处理时间
        }


class AIService:
    """AI服务主类"""
    
    def __init__(self):
        self.analyzer = AISignalAnalyzer()
    
    def analyze_signal_data(self, analysis_request: AIAnalysisIn) -> AIAnalysisOut:
        """分析信号数据"""
        start_time = datetime.utcnow()
        
        try:
            # 根据分析类型选择相应的分析方法
            if analysis_request.analysis_type == AnalysisType.SIGNAL_QUALITY:
                result = self.analyzer.analyze_signal_quality(analysis_request.signal_data)
            elif analysis_request.analysis_type == AnalysisType.ANOMALY_DETECTION:
                result = self.analyzer.detect_anomalies(analysis_request.signal_data)
            elif analysis_request.analysis_type == AnalysisType.PATTERN_RECOGNITION:
                result = self.analyzer.recognize_patterns(analysis_request.signal_data)
            else:
                raise ValueError(f"不支持的分析类型: {analysis_request.analysis_type}")
            
            # 计算处理时间
            processing_time = int((datetime.utcnow() - start_time).total_seconds() * 1000)
            result['processing_time_ms'] = processing_time
            
            # 创建输出对象
            return AIAnalysisOut(
                analysis_id=result['analysis_id'],
                record_id=analysis_request.record_id,
                device_id=analysis_request.device_id,
                analysis_type=analysis_request.analysis_type,
                result_label=result['result_label'],
                confidence_score=result['confidence_score'],
                severity_level=result['severity_level'],
                details=result['details'],
                model_version=result['model_version'],
                processing_time_ms=result['processing_time_ms'],
                created_at=start_time
            )
            
        except Exception as e:
            logger.error(f"AI分析失败: {str(e)}")
            # 返回错误结果
            return AIAnalysisOut(
                analysis_id=str(uuid.uuid4()),
                record_id=analysis_request.record_id,
                device_id=analysis_request.device_id,
                analysis_type=analysis_request.analysis_type,
                result_label="分析失败",
                confidence_score=0.0,
                severity_level=SeverityLevel.HIGH,
                details={"error": str(e)},
                model_version="v1.0",
                processing_time_ms=int((datetime.utcnow() - start_time).total_seconds() * 1000),
                created_at=start_time
            )
    
    def batch_analyze(self, analysis_requests: List[AIAnalysisIn]) -> List[AIAnalysisOut]:
        """批量分析"""
        results = []
        for request in analysis_requests:
            result = self.analyze_signal_data(request)
            results.append(result)
        return results