import pandas as pd
import numpy as np
import networkx as nx
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict, Counter
import json
import logging
from models import CompleteNetworkGNNModel
from model_manager import ModelManager
from data_processor import DataProcessor
from graph_builder import GraphBuilder

logger = logging.getLogger(__name__)

class CompleteNetworkDetector(DataProcessor, GraphBuilder):
    def __init__(self, model_dir='models'):
        DataProcessor.__init__(self)
        GraphBuilder.__init__(self)
        self.communities = {}
        self.evidence_chains = {}
        self.risk_scores = {}
        self.gnn_model = None
        self.feature_scaler = StandardScaler()
        self.model_manager = ModelManager(model_dir)
        self.is_trained = False

    def load_and_build_complete_graph(self, employ_file, policy_file, claim_file, visit_file, institution_file):
        logger.info("加载数据并构建完整图网络...")
        self.employ_df = pd.read_csv(employ_file)
        self.policy_df = pd.read_csv(policy_file)
        self.claim_df = pd.read_csv(claim_file)
        self.visit_df = pd.read_csv(visit_file)
        self.institution_df = pd.read_csv(institution_file)
        self._preprocess_data()
        self._build_complete_graph()
        logger.info(f"完整图构建完成: {self.graph.number_of_nodes()} 节点, {self.graph.number_of_edges()} 边")

    def _preprocess_data(self):
        self.claim_df['赔付金额（元）'] = self.claim_df['赔付金额（元）'].fillna(0)
        self.visit_df['总费用（元）'] = self.visit_df['总费用（元）'].fillna(0)
        self._convert_date_columns()
        self._create_complete_features()

    def _build_complete_graph(self):
        self.graph = nx.Graph()
        self.node_features = {}
        self.node_labels = {}
        self._add_all_nodes()
        self._add_complete_edges()
        self._create_complete_features_for_nodes()
        logger.info("完整图网络构建完成")

    def prepare_training_data(self):
        logger.info("准备训练数据...")
        if not self.node_features:
            logger.error("没有节点特征数据")
            return None, None, None, None
        node_ids = list(self.node_features.keys())
        X = np.array([self.node_features[node_id] for node_id in node_ids])
        y = np.array([self.node_labels.get(node_id, 0) for node_id in node_ids])
        edge_index = []
        node_id_to_idx = {node_id: idx for idx, node_id in enumerate(node_ids)}
        for edge in self.graph.edges():
            src, dst = edge
            if src in node_id_to_idx and dst in node_id_to_idx:
                src_idx = node_id_to_idx[src]
                dst_idx = node_id_to_idx[dst]
                edge_index.append([src_idx, dst_idx])
                edge_index.append([dst_idx, src_idx])
        if edge_index:
            edge_index = torch.tensor(edge_index, dtype=torch.long).t().contiguous()
        else:
            edge_index = torch.tensor([[], []], dtype=torch.long)
        if len(X) > 0:
            X_scaled = self.feature_scaler.fit_transform(X)
        else:
            X_scaled = X
        logger.info(f"训练数据准备完成: {X_scaled.shape[0]} 样本, {edge_index.shape[1]} 边")
        logger.info(f"标签分布: 正常 {np.sum(y == 0)}, 可疑 {np.sum(y == 1)}")
        return (torch.tensor(X_scaled, dtype=torch.float),
                torch.tensor(y, dtype=torch.float),
                edge_index,
                node_ids)

    def train_complete_model(self, epochs=100, lr=0.001):
        logger.info("开始训练完整网络GNN模型...")
        X, y, edge_index, node_ids = self.prepare_training_data()
        if X is None or edge_index.shape[1] == 0:
            logger.error("无法准备有效的训练数据")
            return
        input_dim = X.shape[1]
        self.gnn_model = CompleteNetworkGNNModel(
            input_dim=input_dim,
            hidden_dims=[256, 128, 64],
            output_dim=1,
            dropout=0.3
        )
        pos_weight = torch.tensor([(y == 0).sum() / (y == 1).sum()])
        criterion = nn.BCEWithLogitsLoss(pos_weight=pos_weight)
        optimizer = torch.optim.AdamW(self.gnn_model.parameters(), lr=lr, weight_decay=1e-4)
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)
        train_losses = []
        train_accuracies = []
        for epoch in range(epochs):
            self.gnn_model.train()
            optimizer.zero_grad()
            outputs = self.gnn_model(X, edge_index)
            loss = criterion(outputs.squeeze(), y)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.gnn_model.parameters(), max_norm=1.0)
            optimizer.step()
            scheduler.step()
            with torch.no_grad():
                preds = (outputs.squeeze() > 0.5).float()
                accuracy = (preds == y).float().mean()
                try:
                    auc_score = roc_auc_score(y.numpy(), outputs.squeeze().detach().numpy())
                except:
                    auc_score = 0.5
            train_losses.append(loss.item())
            train_accuracies.append(accuracy.item())
            if epoch % 20 == 0 or epoch == epochs - 1:
                risk_scores = outputs.squeeze().detach().numpy()
                logger.info(f'Epoch {epoch}/{epochs}, Loss: {loss.item():.4f}, '
                            f'Accuracy: {accuracy.item():.4f}, AUC: {auc_score:.4f}')
                logger.info(f'  风险范围: [{risk_scores.min():.3f}, {risk_scores.max():.3f}], '
                            f'标准差: {risk_scores.std():.3f}')
        self.is_trained = True
        logger.info("完整网络GNN模型训练完成!")
        return train_losses

    def predict_network_risks(self):
        logger.info("预测节点网络风险...")
        if not self.is_trained or self.gnn_model is None:
            logger.error("GNN模型未训练")
            return None
        X, _, edge_index, node_ids = self.prepare_training_data()
        if X is None:
            return None
        self.gnn_model.eval()
        with torch.no_grad():
            predictions = self.gnn_model(X, edge_index)
        node_predictions = {}
        for i, node_id in enumerate(node_ids):
            risk_score = predictions[i].item()
            node_predictions[node_id] = risk_score
            if node_id in self.graph:
                self.graph.nodes[node_id]['network_risk'] = risk_score
        risk_values = list(node_predictions.values())
        logger.info(f"网络风险预测完成: 平均 {np.mean(risk_values):.3f}, "
                    f"最大 {max(risk_values):.3f}, 最小 {min(risk_values):.3f}, "
                    f"标准差 {np.std(risk_values):.3f}")
        return node_predictions

    def detect_complete_communities(self, min_community_size=5):
        logger.info("检测完整社区...")
        if not any('network_risk' in self.graph.nodes[node] for node in self.graph.nodes()):
            logger.warning("没有网络风险分数，先进行风险预测")
            self.predict_network_risks()
        try:
            from community import community_louvain
            weighted_graph = self.graph.copy()
            for u, v in weighted_graph.edges():
                u_risk = weighted_graph.nodes[u].get('network_risk', 0.3)
                v_risk = weighted_graph.nodes[v].get('network_risk', 0.3)
                weight = u_risk * v_risk
                weighted_graph[u][v]['weight'] = weight
            partition = community_louvain.best_partition(weighted_graph, weight='weight', resolution=1.2)
            raw_communities = defaultdict(list)
            for node, community_id in partition.items():
                raw_communities[community_id].append(node)
            self.communities = {}
            for comm_id, nodes in raw_communities.items():
                if len(nodes) >= min_community_size:
                    node_types = [self.graph.nodes[node].get('type') for node in nodes]
                    type_counter = Counter(node_types)
                    key_type_count = sum(1 for key_type in self.key_node_types if type_counter.get(key_type, 0) > 0)
                    if key_type_count >= self.min_key_types_in_community:
                        self.communities[comm_id] = nodes
            logger.info(f"发现 {len(self.communities)} 个完整社区 "
                        f"(至少包含 {self.min_key_types_in_community} 种关键节点类型)")
        except ImportError:
            logger.warning("community_louvain未安装，使用基础社区检测")
            self._fallback_community_detection(min_community_size)

    def _fallback_community_detection(self, min_community_size):
        communities = list(nx.connected_components(self.graph))
        self.communities = {}
        for i, comp in enumerate(communities):
            if len(comp) >= min_community_size:
                node_types = [self.graph.nodes[node].get('type') for node in comp]
                type_counter = Counter(node_types)
                key_type_count = sum(1 for key_type in self.key_node_types if type_counter.get(key_type, 0) > 0)
                if key_type_count >= self.min_key_types_in_community:
                    self.communities[f"comp_{i}"] = list(comp)

    def calculate_community_risk_with_completeness(self):
        logger.info("计算完整社区风险...")
        if not self.communities:
            logger.warning("没有检测到社区")
            return
        community_risk_scores = {}
        for comm_id, nodes in self.communities.items():
            risks = [self.graph.nodes[node].get('network_risk', 0) for node in nodes]
            node_types = [self.graph.nodes[node].get('type') for node in nodes]
            type_counter = Counter(node_types)
            if not risks:
                continue
            avg_risk = np.mean(risks)
            max_risk = np.max(risks)
            min_risk = np.min(risks)
            high_risk_count = len([r for r in risks if r > 0.7])
            high_risk_ratio = high_risk_count / len(risks)
            completeness_score = self._calculate_completeness_score(type_counter)
            key_node_density = self._calculate_key_node_density(nodes, type_counter)
            subgraph = self.graph.subgraph(nodes)
            density = nx.density(subgraph)
            community_edges = self._extract_community_edges(nodes)
            base_risk = (
                    0.20 * avg_risk +
                    0.25 * max_risk +
                    0.20 * high_risk_ratio +
                    0.15 * completeness_score +
                    0.10 * key_node_density +
                    0.05 * min(density * 3, 1.0) +
                    0.05 * (len(community_edges) / (len(nodes) * 2))
            )
            if base_risk > 0.6:
                community_risk = base_risk ** 0.8
            else:
                community_risk = base_risk ** 1.2
            community_risk = min(community_risk, 1.0)
            community_risk_scores[comm_id] = {
                'total_risk': community_risk,
                'avg_risk': avg_risk,
                'max_risk': max_risk,
                'min_risk': min_risk,
                'high_risk_ratio': high_risk_ratio,
                'completeness_score': completeness_score,
                'key_node_density': key_node_density,
                'density': density,
                'node_count': len(nodes),
                'type_distribution': dict(type_counter),
                'high_risk_nodes': high_risk_count,
                'nodes': nodes,
                'edges': community_edges
            }
        self.risk_scores = community_risk_scores
        if community_risk_scores:
            risks = [score['total_risk'] for score in community_risk_scores.values()]
            risk_array = np.array(risks)
            logger.info(f"完整社区风险统计:")
            logger.info(f"  范围: [{risk_array.min():.3f}, {risk_array.max():.3f}]")
            logger.info(f"  均值: {risk_array.mean():.3f}, 标准差: {risk_array.std():.3f}")
            complete_communities = [comm for comm, score in community_risk_scores.items()
                                    if score['completeness_score'] > 0.8]
            logger.info(f"  完整关键节点社区: {len(complete_communities)} 个")

    def _calculate_completeness_score(self, type_counter):
        key_type_presence = [1 for key_type in self.key_node_types if type_counter.get(key_type, 0) > 0]
        completeness = sum(key_type_presence) / len(self.key_node_types)
        return completeness

    def _calculate_key_node_density(self, nodes, type_counter):
        key_nodes_count = sum(type_counter.get(key_type, 0) for key_type in self.key_node_types)
        return key_nodes_count / len(nodes) if nodes else 0

    def _extract_community_edges(self, nodes):
        edges_info = []
        subgraph = self.graph.subgraph(nodes)
        for u, v, data in subgraph.edges(data=True):
            edge_info = {
                'source': u,
                'target': v,
                'relationship': data.get('relationship', 'unknown'),
                'weight': data.get('weight', 1.0),
                'source_type': self.graph.nodes[u].get('type', 'unknown'),
                'target_type': self.graph.nodes[v].get('type', 'unknown'),
                'source_risk': self.graph.nodes[u].get('network_risk', 0),
                'target_risk': self.graph.nodes[v].get('network_risk', 0)
            }
            edges_info.append(edge_info)
        return edges_info

    def build_complete_evidence_chains(self, top_k=20):
        logger.info("构建完整证据链...")
        if not self.risk_scores:
            logger.warning("没有风险分数数据")
            return
        sorted_communities = sorted(self.risk_scores.items(),
                                    key=lambda x: (x[1]['total_risk'], x[1]['completeness_score']),
                                    reverse=True)
        selected_communities = sorted_communities[:min(top_k, len(sorted_communities))]
        for comm_id, risk_info in selected_communities:
            evidence_chain = {
                'community_id': comm_id,
                'risk_score': risk_info['total_risk'],
                'completeness_score': risk_info['completeness_score'],
                'risk_breakdown': {
                    'avg_risk': risk_info['avg_risk'],
                    'max_risk': risk_info['max_risk'],
                    'min_risk': risk_info['min_risk'],
                    'high_risk_ratio': risk_info['high_risk_ratio'],
                    'key_node_density': risk_info['key_node_density'],
                    'density': risk_info['density']
                },
                'type_distribution': risk_info['type_distribution'],
                'key_entities': self._extract_key_entities(risk_info['nodes']),
                'suspicious_patterns': self._identify_complete_patterns(risk_info['nodes'],
                                                                        risk_info['type_distribution']),
                'network_metrics': self._calculate_network_metrics(risk_info['nodes']),
                'edges': risk_info['edges'],
                'edge_summary': self._summarize_edges(risk_info['edges'])
            }
            self.evidence_chains[comm_id] = evidence_chain
        logger.info(f"构建了 {len(self.evidence_chains)} 个完整证据链")

    def _extract_key_entities(self, nodes):
        key_entities = {
            'high_risk_employees': [],
            'high_risk_institutions': [],
            'high_risk_visits': [],
            'suspicious_claims': []
        }
        for node in nodes:
            node_data = self.graph.nodes[node]
            risk = node_data.get('network_risk', 0)
            node_type = node_data.get('type', '')
            if risk > 0.7:
                entity_info = {
                    'node_id': node,
                    'risk_score': risk,
                    'original_id': node_data.get('original_id', ''),
                    'type': node_type
                }
                if node_type == 'employee':
                    entity_info.update({
                        'position': node_data.get('position', ''),
                        'department': node_data.get('department', ''),
                        'violation_intensity': node_data.get('violation_intensity', 0)
                    })
                    key_entities['high_risk_employees'].append(entity_info)
                elif node_type == 'institution':
                    entity_info.update({
                        'inst_type': node_data.get('inst_type', ''),
                        'level': node_data.get('level', ''),
                        'bad_record_score': node_data.get('bad_record_score', 0)
                    })
                    key_entities['high_risk_institutions'].append(entity_info)
                elif node_type == 'visit':
                    entity_info.update({
                        'total_cost': node_data.get('total_cost', 0),
                        'duration': node_data.get('duration', 0),
                        'cost_anomaly': node_data.get('cost_anomaly', 0)
                    })
                    key_entities['high_risk_visits'].append(entity_info)
                elif node_type == 'claim':
                    entity_info.update({
                        'amount': node_data.get('amount', 0),
                        'is_fraud': node_data.get('is_fraud', 0),
                        'delay_risk': node_data.get('delay_risk', 0)
                    })
                    key_entities['suspicious_claims'].append(entity_info)
        return key_entities

    def _identify_complete_patterns(self, nodes, type_distribution):
        patterns = []
        subgraph = self.graph.subgraph(nodes)
        completeness = self._calculate_completeness_score(type_distribution)
        if completeness > 0.9:
            patterns.append("完整欺诈网络 (包含所有关键节点类型)")
        elif completeness > 0.6:
            patterns.append("部分完整欺诈网络")
        emp_count = type_distribution.get('employee', 0)
        inst_count = type_distribution.get('institution', 0)
        visit_count = type_distribution.get('visit', 0)
        if emp_count > 0 and inst_count > 0 and visit_count > 0:
            patterns.append("员工-机构-就诊完整链条")
        elif emp_count > 0 and inst_count > 0:
            patterns.append("员工-机构关联模式")
        elif emp_count > 0 and visit_count > 0:
            patterns.append("员工-就诊关联模式")
        risks = [self.graph.nodes[node].get('network_risk', 0) for node in nodes]
        high_risk_ratio = len([r for r in risks if r > 0.7]) / len(risks)
        if high_risk_ratio > 0.5:
            patterns.append(f"高风险节点集中 ({high_risk_ratio:.1%})")
        return patterns

    def _calculate_network_metrics(self, nodes):
        subgraph = self.graph.subgraph(nodes)
        metrics = {
            "node_count": len(nodes),
            "edge_count": subgraph.number_of_edges(),
            "density": nx.density(subgraph),
            "average_degree": sum(dict(subgraph.degree()).values()) / len(nodes) if nodes else 0
        }
        try:
            if len(nodes) > 1:
                metrics["clustering_coefficient"] = nx.average_clustering(subgraph)
            else:
                metrics["clustering_coefficient"] = 0
        except:
            metrics["clustering_coefficient"] = 0
        return metrics

    def _summarize_edges(self, edges):
        if not edges:
            return {}
        relationship_counts = defaultdict(int)
        edge_weights = []
        type_connections = defaultdict(int)
        for edge in edges:
            rel_type = edge.get('relationship', 'unknown')
            relationship_counts[rel_type] += 1
            edge_weights.append(edge.get('weight', 1.0))
            src_type = edge.get('source_type', 'unknown')
            tgt_type = edge.get('target_type', 'unknown')
            type_pair = f"{src_type}-{tgt_type}"
            type_connections[type_pair] += 1
        return {
            'total_edges': len(edges),
            'relationship_distribution': dict(relationship_counts),
            'type_connections': dict(type_connections),
            'avg_weight': np.mean(edge_weights) if edge_weights else 0,
            'max_weight': max(edge_weights) if edge_weights else 0,
            'min_weight': min(edge_weights) if edge_weights else 0
        }

    def generate_complete_report(self, output_file='complete_network_report.json'):
        logger.info("生成完整网络检测报告...")
        if not self.risk_scores:
            self.calculate_community_risk_with_completeness()
        if not self.evidence_chains:
            self.build_complete_evidence_chains()
        if self.risk_scores:
            risk_values = [score['total_risk'] for score in self.risk_scores.values()]
            high_risk_threshold = np.percentile(risk_values, 75)
            medium_risk_threshold = np.percentile(risk_values, 45)
        else:
            high_risk_threshold = 0.7
            medium_risk_threshold = 0.4
        if self.risk_scores:
            high_risk_count = len([x for x in self.risk_scores.values()
                                   if x['total_risk'] > high_risk_threshold])
            medium_risk_count = len([x for x in self.risk_scores.values()
                                     if high_risk_threshold >= x['total_risk'] > medium_risk_threshold])
            low_risk_count = len(self.risk_scores) - high_risk_count - medium_risk_count
            complete_networks = len([x for x in self.risk_scores.values()
                                     if x['completeness_score'] > 0.8 and x['total_risk'] > high_risk_threshold])
        else:
            high_risk_count = medium_risk_count = low_risk_count = complete_networks = 0
        report = {
            'summary': {
                'total_communities': len(self.communities) if self.communities else 0,
                'high_risk_communities': high_risk_count,
                'medium_risk_communities': medium_risk_count,
                'low_risk_communities': low_risk_count,
                'complete_high_risk_networks': complete_networks,
                'total_nodes_analyzed': self.graph.number_of_nodes(),
                'total_edges_analyzed': self.graph.number_of_edges(),
                'model_trained': self.is_trained,
                'risk_threshold_high': high_risk_threshold,
                'risk_threshold_medium': medium_risk_threshold,
                'threshold_difference': high_risk_threshold - medium_risk_threshold,
                'detection_method': 'Complete Network GNN',
                'key_node_types': self.key_node_types
            },
            'risk_distribution': self._get_complete_risk_distribution(),
            'network_completeness_analysis': self._get_completeness_analysis(),
            'high_risk_groups': {},
            'medium_risk_groups': {},
            'low_risk_groups': {},
            'recommendations': self._generate_complete_recommendations(high_risk_count, complete_networks)
        }
        for comm_id, evidence in self.evidence_chains.items():
            risk_score = evidence['risk_score']
            if risk_score > high_risk_threshold:
                report['high_risk_groups'][comm_id] = evidence
            elif risk_score > medium_risk_threshold:
                report['medium_risk_groups'][comm_id] = evidence
            else:
                report['low_risk_groups'][comm_id] = evidence
        with open(output_file, 'w', encoding='utf-8') as f:
            json.dump(report, f, indent=2, ensure_ascii=False)
        logger.info(f"完整网络报告已生成: {output_file}")
        return report

    def _get_complete_risk_distribution(self):
        if not self.risk_scores:
            return {}
        risk_values = [score['total_risk'] for score in self.risk_scores.values()]
        completeness_scores = [score['completeness_score'] for score in self.risk_scores.values()]
        return {
            'risk_mean': np.mean(risk_values),
            'risk_std': np.std(risk_values),
            'risk_max': max(risk_values),
            'risk_min': min(risk_values),
            'completeness_mean': np.mean(completeness_scores) if completeness_scores else 0,
            'completeness_std': np.std(completeness_scores) if completeness_scores else 0,
            'high_risk_ratio': len([r for r in risk_values if r > 0.7]) / len(risk_values) if risk_values else 0,
            'complete_network_ratio': len([c for c in completeness_scores if c > 0.8]) / len(
                completeness_scores) if completeness_scores else 0
        }

    def _get_completeness_analysis(self):
        if not self.risk_scores:
            return {}
        completeness_by_type = defaultdict(list)
        for score in self.risk_scores.values():
            type_dist = score['type_distribution']
            for node_type in self.key_node_types:
                if node_type in type_dist:
                    completeness_by_type[node_type].append(type_dist[node_type] / score['node_count'])
        avg_completeness = {}
        for node_type, values in completeness_by_type.items():
            if values:
                avg_completeness[f'avg_{node_type}_density'] = np.mean(values)
        return avg_completeness

    def _generate_complete_recommendations(self, high_risk_count, complete_networks):
        recommendations = []
        if complete_networks > 0:
            recommendations.append(f"发现 {complete_networks} 个完整的高风险欺诈网络")
            recommendations.append("立即对完整欺诈网络进行深度调查")
            recommendations.append("重点关注员工-机构-就诊的完整关联链条")
        if high_risk_count > 0:
            recommendations.append(f"共发现 {high_risk_count} 个高风险团伙")
        recommendations.extend([
            "利用网络完整性指标优化调查优先级",
            "建立基于完整网络模式的预警机制",
            "对关键节点类型密度高的网络进行重点监控",
            "结合边关系分析识别核心连接路径",
            "定期评估网络检测模型的完整性指标"
        ])
        return recommendations

    def save_model(self, model_name='complete_network_detector'):
        if self.gnn_model is None or not self.is_trained:
            logger.warning("没有训练好的模型可保存")
            return None, None
        return self.model_manager.save_model(self.gnn_model, self.feature_scaler, model_name)