# model_trainor.py
import numpy as np
import joblib
from sklearn.svm import SVC, LinearSVC
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV
from imblearn.over_sampling import SMOTE, RandomOverSampler
from imblearn.pipeline import Pipeline as imPipeline
from asm_feature_extractor import extract_features
import warnings
from collections import Counter
from sklearn.base import BaseEstimator

# 忽略特定警告
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)

class SafeSampler(BaseEstimator):
    """
    在 fit_resample 时根据 y 的最小类别样本数选择 SMOTE（k_neighbors合适）或 RandomOverSampler。
    这个采样器对 pipeline/交叉验证友好（采样仅发生在训练折）。
    """
    def __init__(self, random_state=42, max_k_neighbors=3):
        self.random_state = random_state
        self.max_k_neighbors = max_k_neighbors
        self._sampler = None

    def fit_resample(self, X, y):
        counts = Counter(y)
        min_count = min(counts.values())
        # 当最小类别样本数 <= 2 时，使用 RandomOverSampler（SMOTE 不可行）
        if min_count > 2:
            k = min(self.max_k_neighbors, max(1, min_count - 1))
            self._sampler = SMOTE(random_state=self.random_state, k_neighbors=k)
        else:
            self._sampler = RandomOverSampler(random_state=self.random_state)
        return self._sampler.fit_resample(X, y)


# prepare_data 不再对全量数据做 SMOTE，只返回原始特征与标签
def prepare_data(model_path, asm_dir, label_file, check_file):
    """准备训练数据（不做任何过采样/欠采样），在训练阶段再处理类别不平衡"""
    cache_data = extract_features(model_path, asm_dir, label_file, check_file)
    X = cache_data['features']
    y = cache_data['targets']
    y = y - 1  # 转换为0-based标签
    
    # 输出类别分布供查看
    unique, counts = np.unique(y, return_counts=True)
    print("\n原始类别分布:")
    for cls, count in zip(unique, counts):
        print(f"  类别 {cls}: {count} 个样本")
    
    return X, y

def get_model_by_name(model_name):
    """返回相应模型"""
    models = {
        'SVM': SVC(probability=True, random_state=42),
        'LinearSVM': LinearSVC(random_state=42, dual=False),
        'LogisticRegression': LogisticRegression(max_iter=1000, random_state=42),
        'XGBoost': XGBClassifier(random_state=42, use_label_encoder=False, eval_metric='mlogloss'),
        'MLP': MLPClassifier(random_state=42, max_iter=1000)
    }
    return models.get(model_name)

def get_param_grid_by_name(model_name):
    """返回相应模型的参数网格"""
    param_grids = {
        'SVM': {
            'clf__C': [0.1, 1, 10],
            'clf__gamma': ['scale', 'auto'],
            'clf__class_weight': [None, 'balanced']
        },
        'LinearSVM': {
            'clf__C': [0.1, 1, 10],
            'clf__penalty': ['l1', 'l2'],
            'clf__class_weight': [None, 'balanced']
        },
        'LogisticRegression': {
            'clf__C': [0.1, 1, 10],
            'clf__penalty': ['l1', 'l2'],
            'clf__solver': ['saga'],
            'clf__class_weight': [None, 'balanced']
        },
        'XGBoost': {
            'clf__n_estimators': [100, 200],
            'clf__max_depth': [3, 6],
            'clf__learning_rate': [0.05, 0.1],
            'clf__subsample': [0.8],
            'clf__colsample_bytree': [0.8],
            'clf__gamma': [0],
            'clf__reg_alpha': [0],
            'clf__reg_lambda': [0.5, 1]
        },
        'MLP': {
            'clf__hidden_layer_sizes': [(50,), (100,), (50, 50)],
            'clf__activation': ['relu', 'tanh'],
            'clf__alpha': [0.0001, 0.001],
            'clf__learning_rate_init': [0.001, 0.01]
        }
    }
    return param_grids.get(model_name)

def train_and_evaluate_model(model_path, asm_dir, label_file, check_file, custom_file, selected_model=None):
    """
    训练并评估分类模型（采样器放入 pipeline，以避免数据泄露）
    可以通过 selected_model 参数选择特定的模型进行训练
    """
    # 准备数据（原始）
    X, y = prepare_data(model_path, asm_dir, label_file, check_file)
    
    # 划分训练集和测试集（保持原始分布，随后对训练集在 pipeline 中做采样）
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, stratify=y, random_state=42
    )

    print(f"\n训练样本: {X_train.shape[0]}, 测试样本: {X_test.shape[0]}")

    # 如果用户选择了特定的模型，则只训练该模型
    if selected_model:
        models = {
            selected_model: {
                'estimator': get_model_by_name(selected_model),
                'param_grid': get_param_grid_by_name(selected_model)
            }
        }
    else:
        # 默认训练所有模型
        models = {
            'SVM': {
                'estimator': SVC(probability=True, random_state=42),
                'param_grid': {
                    'clf__C': [0.1, 1, 10],
                    'clf__gamma': ['scale', 'auto'],
                    'clf__class_weight': [None, 'balanced']
                }
            },
            'LinearSVM': {
                'estimator': LinearSVC(random_state=42, dual=False),
                'param_grid': {
                    'clf__C': [0.1, 1, 10],
                    'clf__penalty': ['l1', 'l2'],
                    'clf__class_weight': [None, 'balanced']
                }
            },
            'LogisticRegression': {
                'estimator': LogisticRegression(max_iter=1000, random_state=42),
                'param_grid': {
                    'clf__C': [0.1, 1, 10],
                    'clf__penalty': ['l1', 'l2'],
                    'clf__solver': ['saga'],
                    'clf__class_weight': [None, 'balanced']
                }
            },
            'XGBoost': {
                'estimator': XGBClassifier(random_state=42, use_label_encoder=False, eval_metric='mlogloss'),
                'param_grid': {
                    'clf__n_estimators': [100, 200],
                    'clf__max_depth': [3, 6],
                    'clf__learning_rate': [0.05, 0.1],
                    'clf__subsample': [0.8],
                    'clf__colsample_bytree': [0.8],
                    'clf__gamma': [0],
                    'clf__reg_alpha': [0],
                    'clf__reg_lambda': [0.5, 1]
                }
            },
            'MLP': {
                'estimator': MLPClassifier(random_state=42, max_iter=1000),
                'param_grid': {
                    'clf__hidden_layer_sizes': [(50,), (100,), (50, 50)],
                    'clf__activation': ['relu', 'tanh'],
                    'clf__alpha': [0.0001, 0.001],
                    'clf__learning_rate_init': [0.001, 0.01]
                }
            }
        }

    cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
    best_model = None
    best_score = -1
    results = {}

    # 我们将在 pipeline 中放入 StandardScaler -> SafeSampler -> classifier
    print("\n开始模型训练和调优...")
    for name, cfg in models.items():
        print(f"\n=== 训练 {name} ===")
        base_clf = cfg['estimator']
        param_grid = cfg['param_grid']

        # 构建 imblearn pipeline：scaler -> sampler -> classifier（GridSearch 只搜索 classifier 的参数）
        pipeline = imPipeline([
            ('scaler', StandardScaler()),
            ('sampler', SafeSampler(random_state=42, max_k_neighbors=3)),
            ('clf', base_clf)
        ])

        # GridSearchCV（使用 f1_macro 更能反映不平衡分类性能）
        gs = GridSearchCV(
            estimator=pipeline,
            param_grid=param_grid,
            cv=cv,
            scoring='f1_macro',
            n_jobs=-1,
            verbose=1
        )

        gs.fit(X_train, y_train)

        # 在测试集上评估（注意：测试集未被采样）
        y_pred = gs.predict(X_test)
        acc = accuracy_score(y_test, y_pred)
        report = classification_report(y_test, y_pred)
        cm = confusion_matrix(y_test, y_pred)

        results[name] = {
            'best_params': gs.best_params_,
            'best_cv_score': gs.best_score_,
            'test_accuracy': acc,
            'classification_report': report,
            'confusion_matrix': cm,
            'best_estimator': gs.best_estimator_
        }

        print(f"{name} 最佳参数: {gs.best_params_}")
        print(f"{name} CV (f1_macro): {gs.best_score_:.4f}")
        print(f"{name} 测试集准确率: {acc:.4f}")
        print(f"\n分类报告:\n{report}")
        print(f"混淆矩阵:\n{cm}")

        # 使用测试集准确率作为选择标准
        if acc > best_score:
            best_score = acc
            best_model = gs.best_estimator_
            print(f"🔥 当前最佳模型: {name} (准确率: {acc:.4f})")

    if best_model is None:
        # 极端兜底（不太可能）
        print("\n⚠️ 没有找到合适模型，使用默认 SVM（带采样器）")
        best_model = imPipeline([
            ('scaler', StandardScaler()),
            ('sampler', SafeSampler(random_state=42)),
            ('clf', SVC(probability=True, random_state=42, class_weight='balanced'))
        ])
        best_model.fit(X_train, y_train)

    # 在整个原始数据上重新训练最终 pipeline（先对训练集进行采样再训练）
    print("\n在全量数据上重训练最终模型（pipeline 内会进行必要采样）...")
    final_pipeline = best_model
    final_pipeline.fit(X, y)  # 这里会调用 SafeSampler.fit_resample，所以采样仅用于训练阶段

    # 保存最终模型
    joblib.dump(final_pipeline, custom_file)
    print(f"\n最终模型已保存至: {custom_file}")
    return final_pipeline
