import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score


'''
    首次涨停个股
'''
def filter_10cm_zt_1(train_df):
    f_l_df = filter_10cm(train_df)
    p_col(f_l_df)
     # 定义需要检查的列
    cols_to_check = ['r15', 'ret1', 'ret2','ret3', 'ret5']
    valid_columns = [col for col in cols_to_check if col in f_l_df.columns]
    if valid_columns:
        f_l_df = f_l_df.dropna(subset=valid_columns)
    labelF = LabelF()
    f_l_df = labelF.binarize_ret_10cm_zt(f_l_df)
    f_l_df = f_l_df[f_l_df['raise_buy'] < 3]
    f_l_df = f_l_df[f_l_df['is_zt'] & (f_l_df['zt10'] <2)]
    f_l_df = f_l_df[~((f_l_df['high'] == f_l_df['low']) & f_l_df['quote_rate'] > 9)] #剔除一字板

    p_col(f_l_df)
    return f_l_df

'''
    1、涨幅大于4%小于13%，但非涨停
    2、10日内出现涨幅大于4%次数小于3%
'''
def filter_10cm_gt4(train_df):
    f_l_df = filter_10cm(train_df)
    p_col(train_df)
    cols_to_check = ['r15','bias15', 'ret1', 'ret2','ret3', 'ret5']
    valid_columns = [col for col in cols_to_check if col in train_df.columns]
    if valid_columns:
        f_l_df = f_l_df.dropna(subset=valid_columns)
    labelF = LabelF()
    f_l_df = labelF.binarize_ret_10cm_gt4(f_l_df)
    
    f_l_df = f_l_df[f_l_df['raise_buy'] < 4]
    f_l_df = f_l_df[(f_l_df['bias15'] >= 0)]
    f_l_df = f_l_df[~f_l_df['is_zt']]
    f_l_df = f_l_df[(f_l_df['quote_rate'] >= 3) &(f_l_df['quote_rate'] <=13)]
    f_l_df = f_l_df[(f_l_df['gt4'] < 3)]
    p_col(f_l_df)
    return f_l_df

def filter_10cm_outlier(train_df):
    f_l_df = filter_10cm(train_df)
    p_col(train_df)
    cols_to_check = ['r15','bias15', 'ret1', 'ret2','ret3', 'ret5']
    valid_columns = [col for col in cols_to_check if col in train_df.columns]
    if valid_columns:
        f_l_df = f_l_df.dropna(subset=valid_columns)
    labelF = LabelF()
    f_l_df = labelF.binarize_outlier(f_l_df)
    
    f_l_df = f_l_df[f_l_df['raise_buy'] < 4]
    f_l_df = f_l_df[~f_l_df['is_zt']]
    p_col(f_l_df)
    return f_l_df


# 定义不同涨幅区间的配置
recall_configs = [
    {
        "name": "首次涨停",
        "class_weights": {0: 1.0, 1:2, 2: 3, 3: 8},
        "pred_weights": np.array([1.0, 2, 3, 8]),
        "save_path": "input/v4_10_high_recall_zt_1.txt"
    },
    {
        "name": "涨幅大于4%",
        "class_weights": {0: 1, 1: 2, 2: 3, 3: 20},
        "pred_weights": np.array([1, 2, 3, 20]),
        "save_path": "input/v4_h_recall_gt3.txt"
    },
]


def train_10_recall_zt_1(train_df):
    config = recall_configs[0]
    train_model_high_recall(train_df,class_weights=config['class_weights'],pred_weights=config['pred_weights'],save_model_path=config['save_path'])
    

def train_recall_zt_after(train_df):
    config = recall_configs[1]
    train_model_high_recall(train_df,class_weights=config['class_weights'],pred_weights=config['pred_weights'],save_model_path=config['save_path'])
    


def train_model_high_recall(train_df, class_weights, pred_weights, save_model_path):
    # 数据准备
    df = train_df
    ret_columns = [col for col in df.columns if col.startswith('ret')]
    features = df.drop(columns=['label', 'date', 'code'] + ret_columns, errors='ignore')
    labels = df['label']
    print(f"recall清洗后有效样本数: {len(features)}")

    # 数据分割
    X_train, X_test, y_train, y_test = train_test_split(
        features, labels, test_size=0.2, shuffle=False
    )
    
    # === 使用传入的权重配置 ===
    print(f"类别权重: {class_weights}")
    print(f"预测权重: {pred_weights}")
    
    # 样本权重计算
    sample_weights = compute_sample_weight(class_weight=class_weights, y=y_train)
    
    # 创建数据集
    train_data = lgb.Dataset(X_train, label=y_train, weight=sample_weights)
    test_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
    
    # 模型参数配置
    params = {
        'objective': 'multiclass',
        'num_class': 4,
        'metric': 'multi_logloss',
        'learning_rate': 0.05,
        'num_leaves': 65,
        'max_depth': 10,
        'min_data_in_leaf': 50,
        'reg_alpha': 0.5,
        'reg_lambda': 0.5,
        'feature_fraction': 0.8,
        'boosting_type': 'dart',
        'verbosity': -1
    }
    
    # 模型训练
    model = lgb.train(
        params,
        train_data,
        num_boost_round=3000,
        valid_sets=[test_data],
        callbacks=[
            # lgb.early_stopping(stopping_rounds=150),
            lgb.log_evaluation(300),
            lgb.reset_parameter(
                learning_rate=lambda iter: 0.05 * (0.995 ** iter)
            )
        ],
        feval=macro_f1_score
    )
    
    # === 使用传入的路径保存模型 ===
    model.save_model(save_model_path)
    print(f"模型已保存至: {save_model_path}")
    
    # 模型评估
    y_pred_prob = model.predict(X_test)
    weighted_probs = y_pred_prob * pred_weights
    y_pred = weighted_probs.argmax(axis=1)
    
    print(f"Accuracy: {accuracy_score(y_test, y_pred):.4f}")
    print("\nClassification Report:\n", classification_report(y_test, y_pred))
    print("Confusion Matrix:\n", confusion_matrix(y_test, y_pred))
    
    return model

    




presicion_configs = [
    {
        "name": "首次涨停",
        "class_weights": {0:1.2, 1:1, 2:1, 3:0.6},
        "pred_weights": np.array([1.2, 1, 1, 0.6]),
        "save_path": "input/v4_10_high_precision_zt_1.txt"
    },
    {
        "name": "涨幅大于%",
        "class_weights": {0:0.9, 1:1, 2:1, 3:0.9},
        "pred_weights": np.array([0.9,1, 1, 0.9]),
        "save_path": "input/v4_h_precision_gt3.txt"
    },
]
def train_10_precision_zt_1(train_df):
    config = presicion_configs[0]
    train_model_high_precision(train_df,class_weights=config['class_weights'],pred_weights=config['pred_weights'],save_model_path=config['save_path'])
    

def train_precision_zt_after(train_df):
    config = presicion_configs[1]
    train_model_high_precision(train_df,class_weights=config['class_weights'],pred_weights=config['pred_weights'],save_model_path=config['save_path'])
    
    


'''
    高精准度模型
'''

def train_model_high_precision(train_df, class_weights, pred_weights, save_model_path):
    df = train_df
    # 数据准备（假设df包含特征列及三日后涨幅分类标签label）
    ret_columns = [col for col in df.columns if col.startswith('ret')]
    features = df.drop(columns=['label', 'date', 'code'] + ret_columns, errors='ignore')

    labels = df['label']  # 标签列已按规则映射为1-4[6](@ref)
    print(f"precision清洗后有效样本数: {len(features)}")

    # 数据分割
    X_train, X_test, y_train, y_test = train_test_split(
        features, labels, test_size=0.2, shuffle=False
    )
     # === 使用传入的权重配置 ===
    print(f"类别权重1: {class_weights}")
    print(f"预测权重: {pred_weights}")
  
    sample_weights = compute_sample_weight(class_weight=class_weights, y=y_train)
    
    # 转换为LightGBM数据集格式（新增：权重 weight=sample_weights）
    train_data = lgb.Dataset(X_train, label=y_train, weight=sample_weights)
    test_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
    
    # 模型参数配置（多分类核心参数）
    params = {
        'objective': 'multiclass',  # 多分类目标函数[1,6](@ref)
        'num_class': 4,            # 分类数对应label的1-4
        'metric': 'multi_logloss',  # 多分类对数损失评估[6](@ref)
        'learning_rate': 0.005,
        'num_leaves': 65,
        'max_depth': 8,                # ↑增强非线性拟合
        'min_data_in_leaf': 300,         # 防止过拟合
        'min_gain_to_split': 0.05,  # 添加分裂增益阈值[6](@ref)

        # 正则化强化
        'reg_lambda': 0.5,              # ↑L2正则化强度
        'feature_fraction': 0.8,        # ↓特征采样比例
        'boosting_type': 'dart',         # 增强泛化能力
        'max_cat_to_onehot': 4,  # 优化类别特征处理[6](@ref)
        'scale_pos_weight': 0.5,  # 抑制正例权重[6](@ref)
        'top_rate': 0.2,          # 限制头部样本影响[7](@ref)
        'path_smooth': 0.5,        # 平滑决策路径[6](@ref)

    }
    
    model = lgb.train(
        params,
        train_data,
        num_boost_round=3000,
        valid_sets=[test_data],
        # === 新增参数 ===
        callbacks=[
            # lgb.early_stopping(stopping_rounds=150),
            lgb.log_evaluation(300),
            lgb.reset_parameter(
                learning_rate=lambda iter: max(0.0005, 0.01*(0.999**iter))  # 保底0.001
            )
        ],
        feval=macro_f1_score  # 必须添加F1监控
    )
    model.save_model(save_model_path)  # 文本格式
    print(f"模型已保存至: {save_model_path}")

    # # 预测概率矩阵
    y_pred_prob = model.predict(X_test)  # 形状: [n_samples, n_classes]
    weighted_probs = y_pred_prob * pred_weights 
    y_pred = weighted_probs.argmax(axis=1)
    
    print(f"Accuracy: {accuracy_score(y_test, y_pred):.4f}")
    print("\nClassification Report:\n", classification_report(y_test, y_pred))
    print("Confusion Matrix:\n", confusion_matrix(y_test, y_pred))






# === 新增代码：宏F1评估函数 ===
def macro_f1_score(y_pred, dataset):
    y_true = dataset.get_label().astype(int)
    y_pred = y_pred.reshape(-1, 4).argmax(axis=1)
    return 'macro_f1', f1_score(y_true, y_pred, average='macro'), True



''' 异常值检测 '''
import numpy as np
import pandas as pd
from sklearn.ensemble import IsolationForest
from lightgbm import LGBMClassifier
from sklearn.metrics import classification_report, confusion_matrix, precision_score, recall_score, f1_score
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split

def train_anomaly_detection_for_class3(train_df, target_class=3):
    """
    针对极度不平衡的类别3进行异常检测建模
    
    参数:
    train_df -- 包含特征和标签的训练DataFrame
    target_class -- 目标类别 (默认为3)
    
    返回:
    训练好的模型系统
    """
    df = train_df
    df = df.dropna()

    # 数据准备（假设df包含特征列及三日后涨幅分类标签label）
    ret_columns = [col for col in df.columns if col.startswith('ret')]
    features = df.drop(columns=['label', 'date', 'code'] + ret_columns, errors='ignore')

    labels = df['label']  # 标签列已按规则映射为1-4[6](@ref)
    print(f"precision清洗后有效样本数: {len(features)}")

    X = features
    y = labels
    
    # 创建目标类别标签 (类别3为1，其他为0)
    is_target = (y == target_class).astype(int)
    
    # 2. 数据分割
    X_train, X_test, y_train, y_test = train_test_split(
        X, is_target, test_size=0.2, stratify=is_target, random_state=42
    )
    
    # 3. 过采样处理 - 针对类别3
    print("\n过采样处理...")
    print(f"原始类别分布: {pd.Series(y_train).value_counts()}")
    
    smote = SMOTE(sampling_strategy={1: 10000}, k_neighbors=5, random_state=42)
    X_res, y_res = smote.fit_resample(X_train, y_train)
    
    print(f"过采样后类别分布: {pd.Series(y_res).value_counts()}")
    
    # 4. 训练异常检测模型 (Isolation Forest)
    print("\n训练异常检测模型...")
    # 计算类别3的比例作为污染参数
    target_ratio = y_train.mean()  # 类别3在训练集中的比例
    
    anomaly_model = IsolationForest(
        n_estimators=300,
        contamination=target_ratio,
        max_samples=256,
        random_state=42,
        verbose=0
    )
    
    # 只使用类别3样本训练
    anomaly_model.fit(X_res[y_res == 1])
    
    # 5. 训练主分类模型 (LightGBM)
    print("\n训练主分类模型...")
    # 计算类别权重
    class_weight = {
        0: 1,
        1: len(y_res[y_res == 0]) / len(y_res[y_res == 1])  # 负样本数/正样本数
    }
    
    main_model = LGBMClassifier(
        n_estimators=1000,
        learning_rate=0.05,
        num_leaves=63,
        max_depth=8,
        min_data_in_leaf=100,
        class_weight=class_weight,
        verbosity=-1,
        random_state=42
    )
    main_model.fit(X_res, y_res)
    
    # 6. 结合两个模型的预测
    def combined_predict(X, threshold=0.5):
        # 主模型预测概率
        main_proba = main_model.predict_proba(X)[:, 1]  # 类别3的概率
        
        # 异常检测模型预测异常分数
        anomaly_scores = anomaly_model.decision_function(X)
        
        # 标准化异常分数 (0-1范围)
        min_score = np.min(anomaly_scores)
        max_score = np.max(anomaly_scores)
        norm_anomaly_scores = (anomaly_scores - min_score) / (max_score - min_score)
        
        # 结合预测结果
        # 条件1: 主模型预测概率 > 阈值
        # 条件2: 异常分数 > 0.5 (表示异常)
        target_mask = (main_proba > threshold) & (norm_anomaly_scores > 0.5)
        
        # 创建最终预测
        predictions = np.zeros(len(X))
        predictions[target_mask] = 1  # 1 表示类别3
        
        return predictions
    
    # 7. 在测试集上评估
    print("\n评估模型...")
    y_pred = combined_predict(X_test)
    
    # 8. 优化阈值
    def optimize_threshold(X_val, y_val):
        """寻找最佳概率阈值"""
        print("优化概率阈值...")
        thresholds = np.linspace(0.1, 0.9, 20)
        f1_scores = []
        
        for thresh in thresholds:
            preds = combined_predict(X_val, threshold=thresh)
            f1 = f1_score(y_val, preds) if len(np.unique(preds)) > 1 else 0
            f1_scores.append(f1)
            
        best_thresh = thresholds[np.argmax(f1_scores)]
        print(f"最佳概率阈值: {best_thresh:.4f} (F1={max(f1_scores):.4f})")
        return best_thresh
    
    best_threshold = optimize_threshold(X_test, y_test)
    
    # 9. 使用最佳阈值重新预测
    final_pred = combined_predict(X_test, threshold=best_threshold)
    
    # 10. 评估结果
    print("\n" + "="*50)
    print(f"类别 {target_class} 检测结果")
    print("="*50)
    print(f"精准率: {precision_score(y_test, final_pred):.4f}")
    print(f"召回率: {recall_score(y_test, final_pred):.4f}")
    print(f"F1分数: {f1_score(y_test, final_pred):.4f}")
    
    # 类别3的混淆矩阵
    print("\n类别3混淆矩阵:")
    print(confusion_matrix(y_test, final_pred))
    
    # 完整分类报告
    print("\n完整分类报告:")
    print(classification_report(y_test, final_pred, target_names=['其他', f'类别{target_class}']))
    
    return {
        'anomaly_model': anomaly_model,
        'main_model': main_model,
        'combined_predict': combined_predict,
        'best_threshold': best_threshold
    }

# 使用示例 ===============================================================

# # 假设已有数据
# # X, y = 加载数据...

# # 训练模型系统
# model_system = train_anomaly_detection_for_class3(X, y, target_class=3)

# # 预测新数据
# new_data = ... # 新数据
# predictions = model_system['combined_predict'](new_data)

# # 获取类别3的预测结果
# class3_predictions = (predictions == 1)