import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score





def f_20cm_6_13(train_df):
    f_l_df = filter_20cm(train_df)
    p_col(f_l_df)
    f_l_df = binarize_ret(f_l_df)
    f_l_df = f_l_df.dropna(subset=['r15','ret3'])
    f_l_df = f_l_df[f_l_df['bias15'] >= 0]
    f_l_df = f_l_df[f_l_df['raise_buy'] < 4]
    # f_l_df = f_l_df[(f_l_df['r1'] > 6) & (f_l_df['r1'] < 13)]
    f_l_df = f_l_df[(f_l_df['r1'] > 0)]

    f_l_df = f_l_df[f_l_df['gt9_num'] <=1]
    # f_l_df = f_l_df[(f_l_df['r10'] < 40)]
    p_col(f_l_df)
    return f_l_df

# 定义不同涨幅区间的配置
recall_configs = [
    
    {
        "name": "涨幅4%-9%",
        "class_weights": {0: 1.8, 1: 0.6, 2: 2, 3: 10},
        "pred_weights": np.array([1.8, 1.6, 2, 10]),
        "save_path": "input/v3_20_high_recall_4_9.txt"
    },
]




def train_20_recall_6_13(train_df):
    config = recall_configs[0]
    train_model_high_reacall_20(train_df,class_weights=config['class_weights'],pred_weights=config['pred_weights'],save_model_path=config['save_path'])
    
'''  20cm个股训练'''
def train_model_high_reacall_20(train_df, class_weights, pred_weights, save_model_path):
    # # 计算每个样本的权重（根据类别分布）
    
    df = train_df
    # 数据准备（假设df包含特征列及三日后涨幅分类标签label）
    features = df.drop(columns=['label', 'date','code','ret3'])  # 日期列需排除
    labels = df['label']  # 标签列已按规则映射为1-4[6](@ref)
    print(f"recall220-清洗后有效样本数: {len(features)}")

    
    # 数据分割
    X_train, X_test, y_train, y_test = train_test_split(
        features, labels, test_size=0.2, shuffle=False
    )
    

    # === 使用传入的权重配置 ===
    print(f"类别权重: {class_weights}")
    print(f"预测权重: {pred_weights}")
    
    
    print(pred_weights)
    sample_weights = compute_sample_weight(class_weight=class_weights, y=y_train)
    
    # 转换为LightGBM数据集格式（新增：权重 weight=sample_weights）
    train_data = lgb.Dataset(X_train, label=y_train, weight=sample_weights)
    test_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
    
    # 模型参数配置（多分类核心参数）
    params = {
        'objective': 'multiclass',  # 多分类目标函数[1,6](@ref)
        'num_class': 4,            # 分类数对应label的1-4
        'metric': 'multi_logloss',  # 多分类对数损失评估[6](@ref)
        'learning_rate': 0.05,
        'num_leaves': 31,
        'max_depth': 8,                # ↑增强非线性拟合
        'min_data_in_leaf': 50,         # 防止过拟合
        
        # 正则化强化
        'reg_alpha': 0.5,               # ↑L1正则化强度
        'reg_lambda': 0.5,              # ↑L2正则化强度
        'feature_fraction': 0.9,        # ↓特征采样比例
        'boosting_type': 'dart',         # 增强泛化能力
        'verbosity': -1  # 隐藏警告（可选）

    }
    
    model = lgb.train(
        params,
        train_data,
        num_boost_round=3000,
        valid_sets=[test_data],
        # === 新增参数 ===
        callbacks=[
            lgb.early_stopping(stopping_rounds=150),
            lgb.log_evaluation(300),
            lgb.reset_parameter(
                learning_rate=lambda iter: 0.05 * (0.995 ** iter)  # 动态学习率衰减
            )
        ],
        feval=macro_f1_score  # 必须添加F1监控
    )
  
     # === 使用传入的路径保存模型 ===
    model.save_model(save_model_path)
    print(f"模型已保存至: {save_model_path}")
    
    # 预测概率矩阵
    y_pred_prob = model.predict(X_test)  # 形状: [n_samples, n_classes]
    # 定义类别权重（需实验调优）
    weighted_probs = y_pred_prob * pred_weights 
    y_pred = weighted_probs.argmax(axis=1)


    print(f"Accuracy: {accuracy_score(y_test, y_pred):.4f}")
    print("\nClassification Report:\n", classification_report(y_test, y_pred))
    print("Confusion Matrix:\n", confusion_matrix(y_test, y_pred))





presicion_configs = [
    {
        "name": "涨幅4%-9%",
        "class_weights": {0:1.1, 1:1, 2:1, 3:1.2},
        "pred_weights": np.array([1.1,1, 1, 0.9]),
        "save_path": "input/v3_20_high_precision_4_9.txt"
    },
]

def train_20_precision_6_13(train_df):
    config = presicion_configs[0]
    train_model_high_precision_20(train_df,class_weights=config['class_weights'],pred_weights=config['pred_weights'],save_model_path=config['save_path'])
    



def train_model_high_precision_20(f_l_df, class_weights, pred_weights, save_model_path):
    df = f_l_df
    # 数据准备（假设df包含特征列及三日后涨幅分类标签label）
    features = df.drop(columns=['label', 'date','code','ret3'])  # 日期列需排除
    labels = df['label']  # 标签列已按规则映射为1-4[6](@ref)
    print(f"precision20-清洗后有效样本数: {len(features)}")

    # 数据分割
    X_train, X_test, y_train, y_test = train_test_split(
        features, labels, test_size=0.2, shuffle=False
    )
    
    # === 使用传入的权重配置 ===
    print(f"类别权重: {class_weights}")
    print(f"预测权重: {pred_weights}")
    
    sample_weights = compute_sample_weight(class_weight=class_weights, y=y_train)
    
    # 转换为LightGBM数据集格式（新增：权重 weight=sample_weights）
    train_data = lgb.Dataset(X_train, label=y_train, weight=sample_weights)
    test_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
    
    # 模型参数配置（多分类核心参数）
    params = {
        'objective': 'multiclass',  # 多分类目标函数[1,6](@ref)
        'num_class': 4,            # 分类数对应label的1-4
        'metric': 'multi_logloss',  # 多分类对数损失评估[6](@ref)
        'learning_rate': 0.005,
        'num_leaves': 31,
        'max_depth': 8,                # ↑增强非线性拟合
        'min_data_in_leaf': 300,         # 防止过拟合
        'min_gain_to_split': 0.05,  # 添加分裂增益阈值[6](@ref)

        # 正则化强化
        'reg_lambda': 0.5,              # ↑L2正则化强度
        'feature_fraction': 0.5,        # ↓特征采样比例
        'boosting_type': 'dart',         # 增强泛化能力
        'max_cat_to_onehot': 4,  # 优化类别特征处理[6](@ref)

        'scale_pos_weight': 0.5,  # 抑制正例权重[6](@ref)
        'top_rate': 0.2,          # 限制头部样本影响[7](@ref)
        'path_smooth': 0.5,        # 平滑决策路径[6](@ref)

    }
    
    model = lgb.train(
        params,
        train_data,
        num_boost_round=3000,
        valid_sets=[test_data],
        # === 新增参数 ===
        callbacks=[
            lgb.early_stopping(stopping_rounds=150),
            lgb.log_evaluation(300),
            lgb.reset_parameter(
                learning_rate=lambda iter: max(0.0005, 0.01*(0.999**iter))  # 保底0.001
            )
        ],
        feval=macro_f1_score  # 必须添加F1监控
    )
   # === 使用传入的路径保存模型 ===
    model.save_model(save_model_path)
    print(f"模型已保存至: {save_model_path}")

    # # 预测概率矩阵
    y_pred_prob = model.predict(X_test)  # 形状: [n_samples, n_classes]
    weighted_probs = y_pred_prob * pred_weights 
    y_pred = weighted_probs.argmax(axis=1)
    
    print(f"Accuracy: {accuracy_score(y_test, y_pred):.4f}")
    print("\nClassification Report:\n", classification_report(y_test, y_pred))
    print("Confusion Matrix:\n", confusion_matrix(y_test, y_pred))



# === 新增代码：宏F1评估函数 ===
def macro_f1_score(y_pred, dataset):
    y_true = dataset.get_label().astype(int)
    y_pred = y_pred.reshape(-1, 4).argmax(axis=1)
    return 'macro_f1', f1_score(y_true, y_pred, average='macro'), True
