import pandas as pd
import numpy as np
import networkx as nx
from collections import defaultdict, Counter
import logging

logger = logging.getLogger(__name__)

class DataProcessor:
    def __init__(self):
        self.graph = nx.Graph()
        self.node_features = {}
        self.node_labels = {}
        self.feature_dim = 64
        self.key_node_types = ['employee', 'institution', 'visit']
        self.min_key_types_in_community = 2

    def _calculate_violation_intensity(self, violation_str):
        if '无' in violation_str or pd.isna(violation_str):
            return 0
        if '次' in violation_str:
            try:
                count = int(violation_str.split('次')[0].split()[-1])
                return min(count * 1.2, 6.0)
            except:
                return 2.5
        return 3.5

    def _parse_bad_records_enhanced(self, record_str):
        if '无' in record_str or pd.isna(record_str):
            return 0
        if '次' in record_str:
            try:
                count = int(record_str.split('次')[0].split()[-1])
                return min(count * 1.0, 5.0)
            except:
                return 2.0
        return 3.0

    def _enhance_features(self, features, feature_type):
        enhanced_features = features.copy()
        if len(features) > 0:
            base_value = features[0]
            enhanced_features.extend([
                base_value ** 0.5,
                np.log1p(base_value * 10) if base_value > 0 else 0,
                base_value * base_value,
            ])
        if len(enhanced_features) < self.feature_dim:
            remaining = self.feature_dim - len(enhanced_features)
            base_val = enhanced_features[0] if enhanced_features else 0.5
            for i in range(remaining):
                variation = 0.15 * (i % 3)
                enhanced_features.append(max(0, min(1, base_val * (0.7 + variation))))
        else:
            enhanced_features = enhanced_features[:self.feature_dim]
        return enhanced_features

    def _convert_date_columns(self):
        date_columns = {
            'claim_df': ['出险时间', '报案时间', '赔付时间'],
            'policy_df': ['投保时间'],
            'visit_df': ['就诊时间'],
            'employ_df': ['入职时间']
        }
        for df_name, cols in date_columns.items():
            df = getattr(self, df_name)
            for col in cols:
                if col in df.columns:
                    df[col] = pd.to_datetime(df[col], errors='coerce')

    def _create_complete_features(self):
        logger.info("创建完整特征集...")
        self.employ_df['violation_intensity'] = self.employ_df['违规记录（近3年）'].apply(
            lambda x: self._calculate_violation_intensity(str(x))
        )
        self.employ_df['position_risk'] = self.employ_df['职位'].apply(
            lambda x: 4.0 if '理赔' in str(x) else (3.0 if '审核' in str(x) else (2.0 if '管理' in str(x) else 0))
        )
        self.employ_df['department_risk'] = self.employ_df['所属部门'].apply(
            lambda x: 3.0 if '理赔' in str(x) else (2.0 if '审核' in str(x) else 0)
        )
        self.employ_df['processed_claims'] = self.employ_df['员工ID'].apply(
            lambda x: len(self.claim_df[self.claim_df['受理理赔员ID'] == x])
        )
        self.employ_df['sold_policies'] = self.employ_df['员工ID'].apply(
            lambda x: len(self.policy_df[self.policy_df['销售业务员ID'] == x])
        )
        self.employ_df['composite_risk'] = (
                self.employ_df['violation_intensity'] * 1.8 +
                self.employ_df['position_risk'] * 1.5 +
                self.employ_df['department_risk'] * 1.2 +
                np.log1p(self.employ_df['processed_claims']) * 0.8 +
                np.log1p(self.employ_df['sold_policies']) * 0.5
        )
        policy_claims = self.claim_df.groupby('关联保单号').agg({
            '赔付金额（元）': ['count', 'sum', 'mean', 'std'],
            '涉嫌欺诈（标签）': lambda x: (x == '是').sum()
        }).reset_index()
        policy_claims.columns = ['关联保单号', 'claim_count', 'claim_total', 'claim_avg', 'claim_std', 'fraud_count']
        self.policy_df = self.policy_df.merge(
            policy_claims, left_on='保单号', right_on='关联保单号', how='left'
        )
        for col in ['claim_count', 'claim_total', 'claim_avg', 'claim_std', 'fraud_count']:
            self.policy_df[col] = self.policy_df[col].fillna(0)
        self.policy_df['premium_risk'] = np.log1p(self.policy_df['保费（元）']) / np.log1p(
            self.policy_df['保费（元）'].quantile(0.95))
        self.policy_df['frequency_risk'] = np.log1p(self.policy_df['claim_count']) / np.log1p(
            self.policy_df['claim_count'].quantile(0.95))
        self.policy_df['fraud_risk'] = self.policy_df['fraud_count'] * 3.0
        self.policy_df['amount_risk'] = np.log1p(self.policy_df['claim_total']) / np.log1p(
            self.policy_df['claim_total'].quantile(0.95))
        self.policy_df['composite_risk'] = (
                self.policy_df['fraud_risk'] * 0.5 +
                self.policy_df['frequency_risk'] * 0.25 +
                self.policy_df['amount_risk'] * 0.15 +
                self.policy_df['premium_risk'] * 0.1
        )
        self.institution_df['bad_record_score'] = self.institution_df['不良记录（近3年）'].apply(
            lambda x: self._parse_bad_records_enhanced(str(x))
        )
        self.institution_df['type_risk'] = self.institution_df['机构类型'].apply(
            lambda x: 3.0 if '私立' in str(x) else (2.5 if '民营' in str(x) else 0.5)
        )
        self.institution_df['level_risk'] = self.institution_df['等级'].apply(
            lambda x: 1.0 if '一级' in str(x) else (0.7 if '二级' in str(x) else 0.3)
        )
        self.institution_df['visit_count'] = self.institution_df['医疗机构ID'].apply(
            lambda x: len(self.visit_df[self.visit_df['医疗机构ID'] == x])
        )
        self.institution_df['total_visit_cost'] = self.institution_df['医疗机构ID'].apply(
            lambda x: self.visit_df[self.visit_df['医疗机构ID'] == x]['总费用（元）'].sum()
        )
        self.institution_df['composite_risk'] = (
                self.institution_df['bad_record_score'] * 1.8 +
                self.institution_df['type_risk'] * 1.5 +
                self.institution_df['level_risk'] * 1.0 +
                np.log1p(self.institution_df['visit_count']) * 0.6 +
                np.log1p(self.institution_df['total_visit_cost']) / 10.0
        )
        self.claim_df['amount_risk'] = np.log1p(self.claim_df['赔付金额（元）']) / np.log1p(
            self.claim_df['赔付金额（元）'].quantile(0.95))
        self.claim_df['fraud_label'] = (self.claim_df['涉嫌欺诈（标签）'] == '是').astype(int)
        if '出险时间' in self.claim_df.columns and '报案时间' in self.claim_df.columns:
            self.claim_df['report_delay'] = (self.claim_df['报案时间'] - self.claim_df['出险时间']).dt.total_seconds() / 3600
            self.claim_df['delay_risk'] = self.claim_df['report_delay'].apply(
                lambda x: 2.0 if x > 72 else (1.0 if x > 24 else 0)
            )
        else:
            self.claim_df['delay_risk'] = 0
        self.claim_df['composite_risk'] = (
                self.claim_df['fraud_label'] * 4.0 +
                self.claim_df['amount_risk'] * 2.0 +
                self.claim_df['delay_risk'] * 1.0
        )
        self.visit_df['cost_risk'] = np.log1p(self.visit_df['总费用（元）']) / np.log1p(
            self.visit_df['总费用（元）'].quantile(0.95))
        self.visit_df['duration_risk'] = np.log1p(self.visit_df.get('住院天数', 0)) / np.log1p(30)
        self.visit_df['cost_per_day'] = self.visit_df['总费用（元）'] / (self.visit_df.get('住院天数', 1) + 1)
        self.visit_df['cost_anomaly'] = (self.visit_df['cost_per_day'] > self.visit_df['cost_per_day'].quantile(
            0.9)).astype(int) * 1.5
        self.visit_df['composite_risk'] = (
                self.visit_df['cost_risk'] * 2.0 +
                self.visit_df['duration_risk'] * 1.5 +
                self.visit_df['cost_anomaly'] * 1.0
        )
        logger.info("完整特征集创建完成")