import numpy as np
from typing import List, Dict, Any
from datetime import datetime
from sklearn.preprocessing import StandardScaler
from app.schemas.bank_risk_schema import (
    BankRiskOutputParams, TimeStepRiskResult, RiskLevel, TaskStatus, BankRiskInputParams
)
from app.tools.redis_client import redis_client
from app.utils.logger import logger
import json

class DateTimeEncoder(json.JSONEncoder):
    """处理datetime的JSON编码器"""
    def default(self, obj):
        if isinstance(obj, datetime):
            return obj.isoformat()
        return super().default(obj)

class BankRiskService:
    def __init__(self):
        self.scaler = StandardScaler()
    
    def calculate_risk_score(self, node_features: np.ndarray) -> np.ndarray:
        """计算节点风险分数"""
        try:
            # 检查特征是否全为零
            if np.all(node_features == 0):
                return np.zeros(node_features.shape[0])
            
            # 特征标准化
            # 添加小量值避免零方差
            epsilon = 1e-10
            node_features = node_features + epsilon
            normalized_features = self.scaler.fit_transform(node_features)
            
            # 使用加权求和计算风险分数
            weights = np.array([0.4, 0.3, 0.3])  # 各特征权重
            risk_score = np.sum(normalized_features * weights, axis=1)
            
            # 归一化到[0,1]区间
            score_range = risk_score.max() - risk_score.min()
            if score_range > epsilon:
                normalized_score = (risk_score - risk_score.min()) / score_range
            else:
                normalized_score = np.full_like(risk_score, 0.5)
            
            return normalized_score
        
        except Exception as e:
            logger.error(f"计算风险分数失败: {str(e)}")
            # 返回默认风险分数
            return np.full(node_features.shape[0], 0.5)
    
    def detect_risk_communities(
        self, 
        transaction_matrix: np.ndarray,
        risk_scores: np.ndarray,
        community_count: int
    ) -> List[int]:
        """检测风险社区"""
        try:
            N = len(risk_scores)
            
            # 检查是否有足够的节点
            if N < community_count:
                community_count = max(1, N)
            
            # 初始化社区矩阵
            X = np.random.rand(N, community_count)
            row_sums = X.sum(axis=1, keepdims=True)
            X = np.where(row_sums > 0, X / row_sums, 1.0 / community_count)
            
            # 迭代优化
            for _ in range(10):
                X_new = np.zeros_like(X)
                for i in range(N):
                    neighbors = transaction_matrix[i] > 0
                    if neighbors.any():
                        # 考虑邻居节点的风险分数
                        neighbor_influence = transaction_matrix[i, neighbors] * risk_scores[neighbors]
                        if neighbor_influence.sum() > 0:
                            X_new[i] = np.average(X[neighbors], weights=neighbor_influence, axis=0)
                        else:
                            X_new[i] = X[i]  # 保持原始分配
                    else:
                        X_new[i] = X[i]  # 孤立节点保持原始分配
                
                # 确保没有零行
                row_sums = X_new.sum(axis=1, keepdims=True)
                X = np.where(row_sums > 0, X_new / row_sums, 1.0 / community_count)
            
            return np.argmax(X, axis=1).tolist()
        
        except Exception as e:
            logger.error(f"风险社区检测失败: {str(e)}")
            # 返回默认社区分配
            return [0] * N
    
    def _identify_risk_patterns(self, results: List[TimeStepRiskResult]) -> List[Dict[str, Any]]:
        """识别风险模式"""
        try:
            risk_patterns = []
            
            for result in results:
                # 识别高风险社区
                high_risk_communities = []
                for community_id, risk_level in enumerate(result.community_risk_levels):
                    if risk_level in [RiskLevel.HIGH, RiskLevel.CRITICAL]:
                        community_nodes = [
                            node_id for node_id, comm in enumerate(result.risk_communities)
                            if comm == community_id
                        ]
                        
                        community_risk_scores = [
                            result.risk_scores[node_id]
                            for node_id in community_nodes
                        ]
                        avg_risk = sum(community_risk_scores) / len(community_risk_scores) if community_risk_scores else 0
                        
                        high_risk_communities.append({
                            "community_id": community_id,
                            "risk_level": risk_level.value,  # 使用枚举值
                            "nodes": community_nodes,
                            "average_risk_score": avg_risk
                        })
                
                if high_risk_communities:
                    pattern = {
                        "timestamp": result.timestamp,  # 已经是字符串
                        "high_risk_communities": high_risk_communities,
                        "alert_nodes": result.alert_nodes,
                        "pattern_type": "community_risk_concentration"
                    }
                    risk_patterns.append(pattern)
            
            return risk_patterns
            
        except Exception as e:
            logger.error(f"识别风险模式失败: {str(e)}")
            return []

    async def analyze_risk_patterns(
        self,
        input_params: BankRiskInputParams,
        task_id: str
    ) -> None:
        """执行风险模式分析"""
        try:
            results = []
            risk_evolution = []
            
            # 验证输入数据的有效性
            if not input_params.nodes_info:
                raise ValueError("节点信息列表为空")
            
            if not input_params.transaction_edges:
                raise ValueError("交易边列表为空")
            
            # 获取实际的节点ID范围
            node_ids = {node.node_id for node in input_params.nodes_info}
            max_node_id = max(node_ids)
            min_node_id = min(node_ids)
            node_count = max_node_id + 1  # 使用最大ID + 1作为节点数量
            
            # 确保 time_window 不超过交易数据的长度
            time_window = min(input_params.time_window, len(input_params.transaction_edges))
            
            for t in range(time_window):
                # 验证当前时间步的交易数据
                current_edges = input_params.transaction_edges[t]
                for edge in current_edges:
                    if len(edge) != 3:
                        raise ValueError(f"无效的边数据格式: {edge}")
                    from_node, to_node, _ = edge
                    if from_node > max_node_id or to_node > max_node_id:
                        raise ValueError(f"边 {edge} 中的节点ID超出范围")
                
                # 构建当前时间步的交易网络
                transaction_matrix = self._build_transaction_matrix(
                    current_edges,
                    node_count
                )
                
                # 提取节点特征
                node_features = self._extract_node_features(
                    input_params.nodes_info,
                    transaction_matrix,
                    node_count
                )
                
                # 计算风险分数
                risk_scores = self.calculate_risk_score(node_features)
                
                # 检测风险社区
                risk_communities = self.detect_risk_communities(
                    transaction_matrix,
                    risk_scores,
                    input_params.community_count
                )
                
                # 确定高风险节点
                alert_nodes = np.where(risk_scores > input_params.risk_threshold)[0].tolist()
                
                # 评估社区风险等级
                community_risk_levels = self._evaluate_community_risk(
                    risk_communities,
                    risk_scores
                )
                
                # 记录结果时直接使用 ISO 格式字符串
                current_time = datetime.now().isoformat()
                time_step_result = {
                    "timestamp": current_time,
                    "risk_communities": risk_communities,
                    "risk_scores": risk_scores.tolist(),
                    "alert_nodes": alert_nodes,
                    "community_risk_levels": [level.value for level in community_risk_levels]
                }
                results.append(time_step_result)
                
                risk_evolution.append(float(risk_scores.mean()))
                
                # 更新进度
                progress_data = {
                    "task_id": task_id,
                    "task_status": TaskStatus.RUNNING.value,  # 使用枚举值
                    "task_progress": int((t + 1) / time_window * 100),
                    "input_params": self._convert_input_params(input_params)  # 新增方法
                }
                await redis_client.update_data(f"{task_id}_result", progress_data)
            
            # 生成最终输出
            output_params = {
                "results": results,
                "overall_risk_score": float(np.mean(risk_evolution)),
                "high_risk_patterns": self._identify_risk_patterns([
                    TimeStepRiskResult(**result) for result in results
                ]),
                "risk_evolution": risk_evolution,
                "algorithm_parameters": self._convert_input_params(input_params)
            }
            
            # 构建最终结果字典
            result_dict = {
                "task_id": task_id,
                "task_status": TaskStatus.COMPLETED.value,  # 使用枚举值
                "task_progress": 100,
                "output_params": output_params,
                "input_params": self._convert_input_params(input_params),
                "metrics": []
            }
            
            await redis_client.update_data(f"{task_id}_result", result_dict)
            
        except Exception as e:
            logger.error(f"风险分析失败: {str(e)}")
            error_status = {
                "task_id": task_id,
                "task_status": TaskStatus.FAILED.value,  # 使用枚举值
                "error_message": str(e),
                "input_params": self._convert_input_params(input_params),
                "metrics": []
            }
            await redis_client.update_data(f"{task_id}_result", error_status)
            raise
    
    def _build_transaction_matrix(self, edges: List[List], node_count: int) -> np.ndarray:
        """构建交易邻接矩阵"""
        matrix = np.zeros((node_count, node_count))
        for edge in edges:
            from_node, to_node, amount = edge
            # 添加索引检查
            if from_node < node_count and to_node < node_count:
                matrix[from_node][to_node] = amount
            else:
                logger.warning(f"跳过无效边: {edge}, 节点数量: {node_count}")
        return matrix
    
    def _extract_node_features(
        self,
        nodes_info: List,
        transaction_matrix: np.ndarray,
        node_count: int
    ) -> np.ndarray:
        """提取节点特征"""
        features = np.zeros((node_count, 3))  # 初始化特征矩阵
        
        for node in nodes_info:
            node_id = node.node_id
            if 0 <= node_id < node_count:
                # 交易量特征
                transaction_volume = transaction_matrix[node_id].sum()
                # 交易频率特征
                transaction_frequency = (transaction_matrix[node_id] > 0).sum()
                # 账户类型风险特征
                account_risk = self._get_account_type_risk(node.account_type)
                
                features[node_id] = [transaction_volume, transaction_frequency, account_risk]
        
        return features
    
    def _get_account_type_risk(self, account_type: str) -> float:
        """计算账户类型风险分数"""
        # 添加账户类型风险映射
        risk_mapping = {
            "个人账户": 0.3,
            "企业账户": 0.5,
            "离岸账户": 0.8,
            "临时账户": 0.9
        }
        return risk_mapping.get(account_type, 0.5)  # 默认风险分数为0.5
    
    def _evaluate_community_risk(
        self,
        communities: List[int],
        risk_scores: np.ndarray
    ) -> List[RiskLevel]:
        """评估社区风险等级"""
        community_risks = []
        unique_communities = set(communities)
        
        for comm in unique_communities:
            comm_nodes = np.array(communities) == comm
            avg_risk = risk_scores[comm_nodes].mean()
            
            if avg_risk > 0.8:
                risk_level = RiskLevel.CRITICAL
            elif avg_risk > 0.6:
                risk_level = RiskLevel.HIGH
            elif avg_risk > 0.4:
                risk_level = RiskLevel.MEDIUM
            else:
                risk_level = RiskLevel.LOW
            
            community_risks.append(risk_level)
        
        return community_risks
    
    async def _update_progress(self, task_id: str, progress: float) -> None:
        """更新任务进度"""
        await redis_client.update_data(
            f"{task_id}_result",
            {
                "task_id": task_id,
                "task_status": TaskStatus.RUNNING,
                "task_progress": int(progress)
            }
        )

    def _convert_input_params(self, input_params: BankRiskInputParams) -> dict:
        """转换输入参数为可JSON序列化的字典"""
        input_dict = input_params.dict()
        # 处理 nodes_info 中的 datetime
        for node in input_dict["nodes_info"]:
            if isinstance(node["timestamp"], datetime):
                node["timestamp"] = node["timestamp"].isoformat()
        return input_dict 