"""
异常检测和风险预警系统
实现供应链异常检测、风险预警和实时监控功能
"""

import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import json
from typing import Dict, List, Tuple, Optional
from sklearn.ensemble import IsolationForest
from sklearn.preprocessing import StandardScaler
from scipy import stats
import warnings
warnings.filterwarnings('ignore')

class DemandAnomalyDetector:
    """需求异常检测器"""
    
    def __init__(self):
        self.isolation_forest = IsolationForest(
            contamination=0.1,  # 异常比例10%
            random_state=42,
            n_estimators=100
        )
        self.scaler = StandardScaler()
        self.demand_patterns = {}
        self.anomaly_thresholds = {}
        
    def extract_demand_features(self, df: pd.DataFrame, product_id: str) -> pd.DataFrame:
        """提取需求特征"""
        product_data = df[df['product_id'] == product_id].copy()
        
        if len(product_data) < 7:
            return pd.DataFrame()
        
        # 按日期聚合
        daily_data = product_data.groupby('date').agg({
            'quantity': ['sum', 'mean', 'std'],
            'price': 'mean'
        }).reset_index()
        
        daily_data.columns = ['date', 'total_quantity', 'avg_quantity', 'std_quantity', 'avg_price']
        daily_data = daily_data.sort_values('date')
        
        # 计算特征
        features = pd.DataFrame()
        features['date'] = daily_data['date']
        features['product_id'] = product_id
        
        # 基本统计特征
        features['quantity_mean'] = daily_data['total_quantity'].rolling(window=7).mean()
        features['quantity_std'] = daily_data['total_quantity'].rolling(window=7).std()
        features['quantity_cv'] = features['quantity_std'] / features['quantity_mean']
        
        # 趋势特征
        features['trend'] = daily_data['total_quantity'].diff()
        features['trend_ma'] = features['trend'].rolling(window=3).mean()
        
        # 波动特征
        features['volatility'] = daily_data['total_quantity'].pct_change().abs()
        features['volatility_ma'] = features['volatility'].rolling(window=7).mean()
        
        # 价格特征
        features['price_change'] = daily_data['avg_price'].pct_change()
        
        # 季节性特征
        features['day_of_week'] = pd.to_datetime(features['date']).dt.dayofweek
        features['month'] = pd.to_datetime(features['date']).dt.month
        features['quarter'] = pd.to_datetime(features['date']).dt.quarter
        
        # 滞后特征
        for lag in [1, 3, 7]:
            features[f'lag_{lag}'] = daily_data['total_quantity'].shift(lag)
        
        return features.dropna()
    
    def detect_statistical_anomalies(self, series: pd.Series) -> List[Dict]:
        """基于统计方法检测异常"""
        anomalies = []
        
        if len(series) < 3:
            return anomalies
        
        # Z-score异常检测
        z_scores = np.abs(stats.zscore(series))
        z_threshold = 2.5
        
        for idx, z_score in enumerate(z_scores):
            if z_score > z_threshold:
                anomalies.append({
                    'index': idx,
                    'value': series.iloc[idx],
                    'z_score': z_score,
                    'type': 'statistical_outlier',
                    'severity': 'high' if z_score > 3 else 'medium'
                })
        
        # 四分位数异常检测
        Q1 = series.quantile(0.25)
        Q3 = series.quantile(0.75)
        IQR = Q3 - Q1
        lower_bound = Q1 - 1.5 * IQR
        upper_bound = Q3 + 1.5 * IQR
        
        for idx, value in enumerate(series):
            if value < lower_bound or value > upper_bound:
                if not any(a['index'] == idx for a in anomalies):
                    anomalies.append({
                        'index': idx,
                        'value': value,
                        'type': 'iqr_outlier',
                        'severity': 'medium'
                    })
        
        return anomalies
    
    def detect_ml_anomalies(self, features: pd.DataFrame) -> List[Dict]:
        """使用机器学习检测异常"""
        if len(features) < 10:
            return []
        
        # 选择特征列
        feature_cols = [col for col in features.columns 
                       if col not in ['date', 'product_id']]
        
        X = features[feature_cols].fillna(0)
        
        # 标准化
        X_scaled = self.scaler.fit_transform(X)
        
        # 异常检测
        anomaly_labels = self.isolation_forest.fit_predict(X_scaled)
        anomaly_scores = self.isolation_forest.decision_function(X_scaled)
        
        anomalies = []
        for idx, (label, score) in enumerate(zip(anomaly_labels, anomaly_scores)):
            if label == -1:  # 异常
                anomalies.append({
                    'index': idx,
                    'date': features.iloc[idx]['date'],
                    'product_id': features.iloc[idx]['product_id'],
                    'anomaly_score': score,
                    'type': 'ml_anomaly',
                    'severity': 'high' if score < -0.5 else 'medium'
                })
        
        return anomalies
    
    def detect_demand_spikes(self, df: pd.DataFrame, product_id: str) -> List[Dict]:
        """检测需求激增"""
        product_data = df[df['product_id'] == product_id].copy()
        
        if len(product_data) < 14:
            return []
        
        daily_data = product_data.groupby('date')['quantity'].sum().sort_index()
        
        # 计算移动平均和阈值
        window = 7
        ma = daily_data.rolling(window=window).mean()
        std = daily_data.rolling(window=window).std()
        
        upper_threshold = ma + 2 * std
        lower_threshold = ma - 2 * std
        
        spikes = []
        for date, value in daily_data.items():
            if not np.isnan(upper_threshold[date]) and value > upper_threshold[date]:
                spike_ratio = value / ma[date] if ma[date] > 0 else 1
                spikes.append({
                    'date': date,
                    'value': value,
                    'expected': ma[date],
                    'spike_ratio': spike_ratio,
                    'type': 'demand_spike',
                    'severity': 'high' if spike_ratio > 3 else 'medium'
                })
        
        return spikes

class SupplyChainRiskAnalyzer:
    """供应链风险分析器"""
    
    def __init__(self):
        self.risk_factors = {
            'supplier_risk': 0.3,
            'demand_risk': 0.25,
            'inventory_risk': 0.2,
            'logistics_risk': 0.15,
            'market_risk': 0.1
        }
        self.risk_thresholds = {
            'critical': 0.8,
            'high': 0.6,
            'medium': 0.4,
            'low': 0.2
        }
    
    def assess_supplier_risk(self, supplier_df: pd.DataFrame) -> Dict:
        """评估供应商风险"""
        risk_metrics = {}
        
        for _, supplier in supplier_df.iterrows():
            supplier_id = supplier['supplier_id']
            
            # 计算风险指标
            on_time_delivery_rate = supplier.get('on_time_delivery_rate', 0.95)
            quality_score = supplier.get('quality_score', 0.9)
            financial_stability = supplier.get('financial_stability', 0.8)
            geographic_risk = supplier.get('geographic_risk', 0.3)
            
            # 综合风险评分
            supplier_risk = (
                0.3 * (1 - on_time_delivery_rate) +
                0.25 * (1 - quality_score) +
                0.25 * (1 - financial_stability) +
                0.2 * geographic_risk
            )
            
            risk_metrics[supplier_id] = {
                'supplier_risk': supplier_risk,
                'on_time_delivery_rate': on_time_delivery_rate,
                'quality_score': quality_score,
                'financial_stability': financial_stability,
                'geographic_risk': geographic_risk,
                'risk_level': self._categorize_risk(supplier_risk)
            }
        
        return risk_metrics
    
    def assess_inventory_risk(self, inventory_df: pd.DataFrame, 
                            demand_df: pd.DataFrame) -> Dict:
        """评估库存风险"""
        risk_metrics = {}
        
        # 合并库存和需求数据
        latest_inventory = inventory_df.groupby('product_id').agg({
            'current_stock': 'last',
            'price': 'mean'
        }).reset_index()
        
        for _, product in latest_inventory.iterrows():
            product_id = product['product_id']
            current_stock = product['current_stock']
            
            # 计算需求波动
            product_demand = demand_df[demand_df['product_id'] == product_id]
            if len(product_demand) > 0:
                demand_std = product_demand['quantity'].std()
                demand_mean = product_demand['quantity'].mean()
                demand_cv = demand_std / demand_mean if demand_mean > 0 else 0
            else:
                demand_cv = 0
            
            # 计算库存周转率
            avg_daily_demand = demand_df[demand_df['product_id'] == product_id]['quantity'].mean()
            if avg_daily_demand > 0:
                turnover_rate = current_stock / avg_daily_demand
            else:
                turnover_rate = float('inf')
            
            # 计算过期风险（假设有保质期数据）
            shelf_life_days = 365  # 假设保质期
            expiration_risk = max(0, 1 - (current_stock / (avg_daily_demand * shelf_life_days)))
            
            # 综合库存风险
            inventory_risk = (
                0.4 * min(demand_cv, 1) +
                0.3 * min(1 / turnover_rate if turnover_rate > 0 else 1, 1) +
                0.3 * expiration_risk
            )
            
            risk_metrics[product_id] = {
                'inventory_risk': inventory_risk,
                'demand_volatility': demand_cv,
                'turnover_days': turnover_rate,
                'expiration_risk': expiration_risk,
                'risk_level': self._categorize_risk(inventory_risk)
            }
        
        return risk_metrics
    
    def assess_market_risk(self, market_df: pd.DataFrame) -> Dict:
        """评估市场风险"""
        risk_metrics = {}
        
        # 计算市场指标
        price_volatility = market_df['price'].pct_change().std()
        demand_trend = market_df['quantity'].diff().mean()
        market_concentration = self._calculate_market_concentration(market_df)
        
        # 综合市场风险
        market_risk = (
            0.4 * price_volatility +
            0.3 * abs(demand_trend) / market_df['quantity'].mean() +
            0.3 * market_concentration
        )
        
        risk_metrics['overall_market'] = {
            'market_risk': market_risk,
            'price_volatility': price_volatility,
            'demand_trend': demand_trend,
            'market_concentration': market_concentration,
            'risk_level': self._categorize_risk(market_risk)
        }
        
        return risk_metrics
    
    def _categorize_risk(self, risk_score: float) -> str:
        """风险等级分类"""
        if risk_score >= self.risk_thresholds['critical']:
            return 'critical'
        elif risk_score >= self.risk_thresholds['high']:
            return 'high'
        elif risk_score >= self.risk_thresholds['medium']:
            return 'medium'
        else:
            return 'low'
    
    def _calculate_market_concentration(self, market_df: pd.DataFrame) -> float:
        """计算市场集中度"""
        # 使用Herfindahl指数
        market_shares = market_df.groupby('product_id')['quantity'].sum()
        total_market = market_shares.sum()
        
        if total_market > 0:
            shares = market_shares / total_market
            herfindahl = (shares ** 2).sum()
            return herfindahl
        return 0

class RealTimeAlertSystem:
    """实时预警系统"""
    
    def __init__(self):
        self.active_alerts = []
        self.alert_history = []
        self.alert_rules = {
            'demand_spike': {'threshold': 2.0, 'cooldown_hours': 4},
            'inventory_low': {'threshold': 0.2, 'cooldown_hours': 8},
            'supplier_delay': {'threshold': 0.1, 'cooldown_hours': 12},
            'price_anomaly': {'threshold': 0.15, 'cooldown_hours': 6}
        }
    
    def generate_alert(self, alert_type: str, entity_id: str, 
                      details: Dict, severity: str) -> Dict:
        """生成预警"""
        alert = {
            'alert_id': f"{alert_type}_{entity_id}_{datetime.now().strftime('%Y%m%d%H%M%S')}",
            'type': alert_type,
            'entity_id': entity_id,
            'severity': severity,
            'details': details,
            'timestamp': datetime.now(),
            'status': 'active',
            'acknowledged': False
        }
        
        return alert
    
    def process_demand_alerts(self, demand_df: pd.DataFrame, 
                            anomaly_detector: DemandAnomalyDetector) -> List[Dict]:
        """处理需求异常预警"""
        alerts = []
        
        for product_id in demand_df['product_id'].unique():
            # 检测需求激增
            spikes = anomaly_detector.detect_demand_spikes(demand_df, product_id)
            
            for spike in spikes:
                if spike['severity'] in ['high', 'medium']:
                    alert = self.generate_alert(
                        'demand_spike',
                        product_id,
                        {
                            'spike_ratio': spike['spike_ratio'],
                            'actual_demand': spike['value'],
                            'expected_demand': spike['expected'],
                            'date': spike['date']
                        },
                        spike['severity']
                    )
                    alerts.append(alert)
        
        return alerts
    
    def process_inventory_alerts(self, inventory_df: pd.DataFrame,
                               replenishment_plan: pd.DataFrame) -> List[Dict]:
        """处理库存预警"""
        alerts = []
        
        # 合并数据
        merged_df = inventory_df.merge(
            replenishment_plan[['product_id', 'reorder_point', 'safety_stock']],
            on='product_id',
            how='left'
        )
        
        for _, row in merged_df.iterrows():
            product_id = row['product_id']
            current_stock = row['current_stock']
            reorder_point = row.get('reorder_point', 0)
            
            if reorder_point > 0:
                stock_ratio = current_stock / reorder_point
                
                if stock_ratio <= 0.1:
                    alert = self.generate_alert(
                        'inventory_critical',
                        product_id,
                        {
                            'current_stock': current_stock,
                            'reorder_point': reorder_point,
                            'stock_ratio': stock_ratio
                        },
                        'critical'
                    )
                    alerts.append(alert)
                elif stock_ratio <= 0.3:
                    alert = self.generate_alert(
                        'inventory_low',
                        product_id,
                        {
                            'current_stock': current_stock,
                            'reorder_point': reorder_point,
                            'stock_ratio': stock_ratio
                        },
                        'medium'
                    )
                    alerts.append(alert)
        
        return alerts
    
    def filter_duplicate_alerts(self, new_alerts: List[Dict]) -> List[Dict]:
        """过滤重复预警"""
        filtered_alerts = []
        
        for new_alert in new_alerts:
            is_duplicate = False
            
            # 检查活跃预警
            for active_alert in self.active_alerts:
                if (active_alert['type'] == new_alert['type'] and
                    active_alert['entity_id'] == new_alert['entity_id'] and
                    (new_alert['timestamp'] - active_alert['timestamp']).seconds < 
                    self.alert_rules.get(new_alert['type'], {}).get('cooldown_hours', 4) * 3600):
                    is_duplicate = True
                    break
            
            if not is_duplicate:
                filtered_alerts.append(new_alert)
        
        return filtered_alerts
    
    def get_active_alerts_summary(self) -> Dict:
        """获取活跃预警摘要"""
        summary = {
            'total_active': len(self.active_alerts),
            'by_severity': {
                'critical': len([a for a in self.active_alerts if a['severity'] == 'critical']),
                'high': len([a for a in self.active_alerts if a['severity'] == 'high']),
                'medium': len([a for a in self.active_alerts if a['severity'] == 'medium']),
                'low': len([a for a in self.active_alerts if a['severity'] == 'low'])
            },
            'by_type': {}
        }
        
        for alert in self.active_alerts:
            alert_type = alert['type']
            if alert_type not in summary['by_type']:
                summary['by_type'][alert_type] = 0
            summary['by_type'][alert_type] += 1
        
        return summary

class SupplyChainMonitoringDashboard:
    """供应链监控仪表板"""
    
    def __init__(self):
        self.demand_detector = DemandAnomalyDetector()
        self.risk_analyzer = SupplyChainRiskAnalyzer()
        self.alert_system = RealTimeAlertSystem()
        
    def run_full_analysis(self, demand_df: pd.DataFrame,
                         inventory_df: pd.DataFrame,
                         supplier_df: Optional[pd.DataFrame] = None,
                         market_df: Optional[pd.DataFrame] = None,
                         replenishment_plan: Optional[pd.DataFrame] = None) -> Dict:
        """运行完整分析"""
        
        print("正在分析需求异常...")
        demand_alerts = self.alert_system.process_demand_alerts(
            demand_df, self.demand_detector
        )
        
        print("正在分析库存风险...")
        inventory_alerts = []
        if replenishment_plan is not None:
            inventory_alerts = self.alert_system.process_inventory_alerts(
                inventory_df, replenishment_plan
            )
        
        print("正在评估供应链风险...")
        supplier_risk = {}
        if supplier_df is not None:
            supplier_risk = self.risk_analyzer.assess_supplier_risk(supplier_df)
        
        inventory_risk = self.risk_analyzer.assess_inventory_risk(inventory_df, demand_df)
        
        market_risk = {}
        if market_df is not None:
            market_risk = self.risk_analyzer.assess_market_risk(market_df)
        
        # 合并所有预警
        all_alerts = demand_alerts + inventory_alerts
        filtered_alerts = self.alert_system.filter_duplicate_alerts(all_alerts)
        
        # 更新活跃预警
        self.alert_system.active_alerts.extend(filtered_alerts)
        self.alert_system.alert_history.extend(filtered_alerts)
        
        # 生成综合报告
        report = {
            'analysis_timestamp': datetime.now(),
            'demand_anomalies': self._summarize_demand_anomalies(demand_df),
            'supply_chain_risks': {
                'supplier_risk': supplier_risk,
                'inventory_risk': inventory_risk,
                'market_risk': market_risk
            },
            'alerts': {
                'new_alerts': filtered_alerts,
                'active_alerts_summary': self.alert_system.get_active_alerts_summary(),
                'total_active_alerts': len(self.alert_system.active_alerts)
            },
            'recommendations': self._generate_recommendations(
                filtered_alerts, supplier_risk, inventory_risk
            )
        }
        
        return report
    
    def _summarize_demand_anomalies(self, demand_df: pd.DataFrame) -> Dict:
        """汇总需求异常"""
        summary = {
            'total_products_analyzed': len(demand_df['product_id'].unique()),
            'products_with_anomalies': 0,
            'anomaly_types': {},
            'severity_distribution': {}
        }
        
        for product_id in demand_df['product_id'].unique():
            features = self.demand_detector.extract_demand_features(demand_df, product_id)
            if len(features) > 0:
                ml_anomalies = self.demand_detector.detect_ml_anomalies(features)
                spikes = self.demand_detector.detect_demand_spikes(demand_df, product_id)
                
                total_anomalies = len(ml_anomalies) + len(spikes)
                if total_anomalies > 0:
                    summary['products_with_anomalies'] += 1
                    
                    # 统计异常类型
                    for anomaly in ml_anomalies:
                        anomaly_type = anomaly['type']
                        if anomaly_type not in summary['anomaly_types']:
                            summary['anomaly_types'][anomaly_type] = 0
                        summary['anomaly_types'][anomaly_type] += 1
                    
                    for spike in spikes:
                        spike_type = spike['type']
                        if spike_type not in summary['anomaly_types']:
                            summary['anomaly_types'][spike_type] = 0
                        summary['anomaly_types'][spike_type] += 1
        
        return summary
    
    def _generate_recommendations(self, alerts: List[Dict], 
                                supplier_risk: Dict, 
                                inventory_risk: Dict) -> List[str]:
        """生成建议"""
        recommendations = []
        
        # 基于预警的建议
        critical_alerts = [a for a in alerts if a['severity'] == 'critical']
        if critical_alerts:
            recommendations.append(
                f"紧急：发现{critical_alerts}个关键预警，需要立即采取行动"
            )
        
        # 基于供应商风险的建议
        high_risk_suppliers = [
            sid for sid, risk in supplier_risk.items() 
            if risk['risk_level'] in ['critical', 'high']
        ]
        if high_risk_suppliers:
            recommendations.append(
                f"供应商风险：{len(high_risk_suppliers)}个高风险供应商需要重点关注"
            )
        
        # 基于库存风险的建议
        high_risk_products = [
            pid for pid, risk in inventory_risk.items() 
            if risk['risk_level'] in ['critical', 'high']
        ]
        if high_risk_products:
            recommendations.append(
                f"库存风险：{len(high_risk_products)}个产品存在高库存风险"
            )
        
        # 通用建议
        recommendations.extend([
            "建议建立更完善的监控机制",
            "考虑增加安全库存以应对需求波动",
            "定期评估和优化供应商网络"
        ])
        
        return recommendations
    
    def save_monitoring_report(self, report: Dict, filepath: str):
        """保存监控报告"""
        def serialize_datetime(obj):
            if isinstance(obj, datetime):
                return obj.isoformat()
            elif isinstance(obj, dict):
                return {k: serialize_datetime(v) for k, v in obj.items()}
            elif isinstance(obj, list):
                return [serialize_datetime(item) for item in obj]
            return obj
        
        report = serialize_datetime(report)
        
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(report, f, ensure_ascii=False, indent=2)
    
    def get_real_time_status(self) -> Dict:
        """获取实时状态"""
        return {
            'timestamp': datetime.now(),
            'active_alerts': len(self.alert_system.active_alerts),
            'system_status': 'operational',
            'last_analysis': getattr(self, '_last_analysis_time', None)
        }