import json
import numpy as np
import pandas as pd
from typing import Dict, List, Tuple, Any
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import classification_report
import joblib

class MLLevelMatcher:
    def __init__(self, data_file: str = None):
        """初始化ML匹配器
        
        Args:
            data_file: 包含薪资数据的JSON文件路径
        """
        self.data_file = data_file
        self.model = None
        self.preprocessor = None
        self.feature_names = None
        self.label_encoder = None
        
        # 评分特征
        self.numeric_features = [
            'experience_score',        # 经验评分
            'education_score',         # 教育评分
            'technical_score',         # 技术评分
            'workload_score',          # 工作量评分
            'compensation_score',      # 薪酬评分
            'responsibility_score'     # 责任评分
        ]
        
        # 分类特征
        self.categorical_features = [
            'location_tier',           # 城市等级
            'tech_level',              # 技术等级
            'role_type',               # 角色类型
            'business_domain'          # 业务领域
        ]
    
    def _create_preprocessor(self):
        """创建特征预处理器"""
        numeric_transformer = Pipeline(steps=[
            ('scaler', StandardScaler())
        ])
        
        categorical_transformer = Pipeline(steps=[
            ('onehot', OneHotEncoder(handle_unknown='ignore'))
        ])
        
        self.preprocessor = ColumnTransformer(
            transformers=[
                ('num', numeric_transformer, self.numeric_features),
                ('cat', categorical_transformer, self.categorical_features)
            ])
    
    def train(self, training_data: List[Dict[str, Any]], verbose: bool = True):
        """训练模型
        
        Args:
            training_data: 训练数据列表
            verbose: 是否打印详细信息
        """
        # 转换为DataFrame
        df = pd.DataFrame(training_data)
        
        if verbose:
            print("原始数据分布:")
            print(df['target_level'].value_counts())
        
        level_counts = df['target_level'].value_counts()
        valid_levels = level_counts[level_counts >= 10].index
        df = df[df['target_level'].isin(valid_levels)]
        
        if verbose:
            print("\n过滤后的数据分布:")
            print(df['target_level'].value_counts())
        
        # 特征矩阵X和目标变量y
        X = df[self.numeric_features + self.categorical_features]
        y = df['target_level']
        
        # 分割
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.2, random_state=42, stratify=y
        )
        
        if verbose:
            print("\n训练数据大小:", len(X_train))
            print("测试数据大小:", len(X_test))
            print("\n各评分特征的相关性:")
            print(df[self.numeric_features].corr())
        
        self._create_preprocessor()
        
        # 创建和训练模型
        self.model = Pipeline([
            ('preprocessor', self.preprocessor),
            ('classifier', RandomForestClassifier(
                n_estimators=200,
                max_depth=None,
                min_samples_split=5,
                min_samples_leaf=2,
                class_weight='balanced',
                random_state=42
            ))
        ])
        
        # 训练模型
        self.model.fit(X_train, y_train)
        
        if verbose:
            cv_scores = cross_val_score(self.model, X_train, y_train, cv=5)
            print("\n交叉验证分数:", cv_scores)
            print("平均分数: %0.2f (+/- %0.2f)" % (cv_scores.mean(), cv_scores.std() * 2))
            
            y_pred = self.model.predict(X_test)
            print("\n模型评估报告：")
            print(classification_report(y_test, y_pred))
            
            importance = self.get_feature_importance()
            print("\n特征重要性:")
            for feature, score in sorted(importance.items(), key=lambda x: x[1], reverse=True):
                print(f"{feature}: {score:.4f}")
            
            # 输出混淆矩阵
            from sklearn.metrics import confusion_matrix
            import seaborn as sns
            import matplotlib.pyplot as plt
            
            cm = confusion_matrix(y_test, y_pred)
            plt.figure(figsize=(10,8))
            sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                       xticklabels=sorted(df['target_level'].unique()),
                       yticklabels=sorted(df['target_level'].unique()))
            plt.title('Confusion Matrix')
            plt.ylabel('True Label')
            plt.xlabel('Predicted Label')
            plt.show()
    
    def predict_level(self, user_features: Dict[str, Any]) -> Tuple[str, float, Dict[str, float]]:
        """预测用户匹配的职级
        
        Args:
            user_features: 用户特征字典
            
        Returns:
            Tuple[str, float, Dict[str, float]]: (预测的职级, 置信度, 各职级的概率)
        """
        if self.model is None:
            return "模型未训练", 0.0, {}
        
        user_df = pd.DataFrame([user_features])
        
        # 确保所有特征都存在
        for feature in self.numeric_features:
            if feature not in user_df.columns:
                user_df[feature] = 0
                
        for feature in self.categorical_features:
            if feature not in user_df.columns:
                user_df[feature] = 'unknown'
        
        # 计算总分用于保底机制
        total_score = (
            user_features.get('experience_score', 0) +
            user_features.get('education_score', 0) +
            user_features.get('compensation_score', 0) +
            user_features.get('responsibility_score', 0)
        )
        
        p4_result, is_p4 = self._check_p4_conditions(user_features, total_score)
        if is_p4:
            return p4_result
            
        p9_result, is_p9 = self._check_p9_conditions(user_features, total_score)
        if is_p9:
            return p9_result
        
        # 正常预测
        predicted_level = self.model.predict(user_df)[0]
        
        probabilities = self.model.predict_proba(user_df)[0]
        confidence = max(probabilities)
        
        # 获取所有职级的预测概率
        class_probs = dict(zip(
            self.model.classes_,
            probabilities
        ))
        
        return predicted_level, confidence, class_probs
    
    def _check_p4_conditions(self, user_features: Dict[str, Any], total_score: float) -> Tuple[Tuple[str, float, Dict[str, float]], bool]:
        """检查是否应该直接判定为P4
        """
        # P4判定条件（需要同时满足多个条件）
        conditions_met = 0

        if total_score < 120:  
            conditions_met += 1
            
        if user_features.get('experience_score', 0) < 20:  
            conditions_met += 1
            
        if user_features.get('compensation_score', 0) < 20:  
            conditions_met += 1
            
        if user_features.get('responsibility_score', 0) <= 45:  
            conditions_met += 1
        
        if user_features.get('tech_level') == '初级':
            conditions_met += 1
            
        # 需要满足至少4个条件才判定为P4
        if conditions_met >= 4:
            return ('P4', 0.85, {
                'P4': 0.85, 'P5': 0.10, 'P6': 0.03, 'P7': 0.01, 'P8': 0.01, 'P9': 0.00
            }), True
            
        return ('', 0.0, {}), False
    
    def _check_p9_conditions(self, user_features: Dict[str, Any], total_score: float) -> Tuple[Tuple[str, float, Dict[str, float]], bool]:
        """检查是否应该直接判定为P9
        """

        conditions_met = 0
        
        if total_score > 290:  
            conditions_met += 1
            
        if user_features.get('experience_score', 0) > 85: 
            conditions_met += 1
            
        if user_features.get('compensation_score', 0) > 60:  
            conditions_met += 1
            
        if user_features.get('responsibility_score', 0) > 70: 
            conditions_met += 1
        
        if (user_features.get('tech_level') == '专家' or 
            user_features.get('role_type') == '管理'):
            conditions_met += 1
            
        # 需要满足至少5个条件才判定为P9
        if conditions_met >= 5:
            return ('P9', 0.88, {
                'P9': 0.88, 'P8': 0.10, 'P7': 0.02, 'P6': 0.00, 'P5': 0.00, 'P4': 0.00
            }), True
            
        return ('', 0.0, {}), False
        
    def get_feature_importance(self) -> Dict[str, float]:
        """获取特征重要性"""
        if self.model is None or not hasattr(self.model.named_steps['classifier'], 'feature_importances_'):
            return {}
            
        importances = self.model.named_steps['classifier'].feature_importances_
        
        # 获取所有特征名称
        numeric_features = self.numeric_features
        categorical_features = (
            self.model.named_steps['preprocessor']
            .named_transformers_['cat']
            .get_feature_names_out(self.categorical_features)
        )
        
        feature_names = numeric_features + categorical_features.tolist()
        
        return dict(zip(feature_names, importances))
    
    def save_model(self, file_path: str):
        """保存模型到文件
        
        Args:
            file_path: 保存模型的文件路径
        """
        if self.model is None:
            raise ValueError("模型未训练，无法保存")
        
        joblib.dump(self, file_path)
    
    @classmethod
    def load_model(cls, file_path: str) -> 'MLLevelMatcher':
        """从文件加载模型
        
        Args:
            file_path: 模型文件路径
            
        Returns:
            MLLevelMatcher: 加载的模型实例
        """
        return joblib.load(file_path)