import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score


'''
    首次涨停个股
'''
def filter_10cm_zt_1(train_df):
    f_l_df = filter_10cm(train_df)
    p_col(f_l_df)
     # 定义需要检查的列
    cols_to_check = ['r15', 'ret1', 'ret2','ret3', 'ret5']
    valid_columns = [col for col in cols_to_check if col in f_l_df.columns]
    if valid_columns:
        f_l_df = f_l_df.dropna(subset=valid_columns)
    f_l_df = binarize_ret_10cm_zt(f_l_df)
   
    f_l_df = f_l_df[f_l_df['raise_buy'] < 3]
    f_l_df = f_l_df[f_l_df['is_zt'] & (f_l_df['zt_num'] <2)]
    p_col(f_l_df)
    return f_l_df

'''
    1、涨幅大于4%小于13%，但非涨停
    2、10日内出现涨幅大于4%次数小于3%
'''
def filter_10cm_gt4(train_df):
    # f_l_df = filter_10cm(train_df)
    p_col(train_df)
    cols_to_check = ['r15','bias15', 'ret1', 'ret2','ret3', 'ret5']
    valid_columns = [col for col in cols_to_check if col in train_df.columns]
    if valid_columns:
        train_df = train_df.dropna(subset=valid_columns)
    f_l_df = binarize_ret_10cm_gt4(train_df)

    f_l_df = f_l_df[(f_l_df['bias15'] >= 0)]
    f_l_df = f_l_df[~f_l_df['is_zt']]
    f_l_df = f_l_df[(f_l_df['quote_rate'] >= 3) &(f_l_df['quote_rate'] <=13)]
    f_l_df = f_l_df[(f_l_df['gt4_num'] <=3)&(f_l_df['gt4_num'] >0)]
    p_col(f_l_df)
    return f_l_df
    


# 定义不同涨幅区间的配置
recall_configs = [
    {
        "name": "首次涨停",
        "class_weights": {0: 1.0, 1:2, 2: 3, 3: 8},
        "pred_weights": np.array([1.0, 2, 3, 8]),
        "save_path": "input/v3_10_high_recall_zt_1.txt"
    },
    {
        "name": "涨幅大于4%",
        "class_weights": {0: 1, 1: 2, 2: 3, 3: 6},
        "pred_weights": np.array([1, 2, 3, 6]),
        "save_path": "input/v3_h_recall_gt3.txt"
    },
]


def train_10_recall_zt_1(train_df):
    config = recall_configs[0]
    train_model_high_recall(train_df,class_weights=config['class_weights'],pred_weights=config['pred_weights'],save_model_path=config['save_path'])
    

def train_recall_zt_after(train_df):
    config = recall_configs[1]
    train_model_high_recall(train_df,class_weights=config['class_weights'],pred_weights=config['pred_weights'],save_model_path=config['save_path'])
    


def train_model_high_recall(train_df, class_weights, pred_weights, save_model_path):
    # 数据准备
    df = train_df
    ret_columns = [col for col in df.columns if col.startswith('ret')]
    features = df.drop(columns=['label', 'date', 'code'] + ret_columns, errors='ignore')
    labels = df['label']
    print(f"recall清洗后有效样本数: {len(features)}")

    # 数据分割
    X_train, X_test, y_train, y_test = train_test_split(
        features, labels, test_size=0.2, shuffle=False
    )
    
    # === 使用传入的权重配置 ===
    print(f"类别权重: {class_weights}")
    print(f"预测权重: {pred_weights}")
    
    # 样本权重计算
    sample_weights = compute_sample_weight(class_weight=class_weights, y=y_train)
    
    # 创建数据集
    train_data = lgb.Dataset(X_train, label=y_train, weight=sample_weights)
    test_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
    
    # 模型参数配置
    params = {
        'objective': 'multiclass',
        'num_class': 4,
        'metric': 'multi_logloss',
        'learning_rate': 0.05,
        'num_leaves': 65,
        'max_depth': 10,
        'min_data_in_leaf': 50,
        'reg_alpha': 0.5,
        'reg_lambda': 0.5,
        'feature_fraction': 0.8,
        'boosting_type': 'dart',
        'verbosity': -1
    }
    
    # 模型训练
    model = lgb.train(
        params,
        train_data,
        num_boost_round=3000,
        valid_sets=[test_data],
        callbacks=[
            # lgb.early_stopping(stopping_rounds=150),
            lgb.log_evaluation(300),
            lgb.reset_parameter(
                learning_rate=lambda iter: 0.05 * (0.995 ** iter)
            )
        ],
        feval=macro_f1_score
    )
    
    # === 使用传入的路径保存模型 ===
    model.save_model(save_model_path)
    print(f"模型已保存至: {save_model_path}")
    
    # 模型评估
    y_pred_prob = model.predict(X_test)
    weighted_probs = y_pred_prob * pred_weights
    y_pred = weighted_probs.argmax(axis=1)
    
    print(f"Accuracy: {accuracy_score(y_test, y_pred):.4f}")
    print("\nClassification Report:\n", classification_report(y_test, y_pred))
    print("Confusion Matrix:\n", confusion_matrix(y_test, y_pred))
    
    return model

    




presicion_configs = [
    {
        "name": "首次涨停",
        "class_weights": {0:1.2, 1:1, 2:1, 3:0.6},
        "pred_weights": np.array([1.2, 1, 1, 0.6]),
        "save_path": "input/v3_10_high_precision_zt_1.txt"
    },
    {
        "name": "涨幅大于%",
        "class_weights": {0:1.1, 1:1, 2:1, 3:0.9},
        "pred_weights": np.array([1.1,1, 1, 0.9]),
        "save_path": "input/v3_h_precision_gt3.txt"
    },
]
def train_10_precision_zt_1(train_df):
    config = presicion_configs[0]
    train_model_high_precision(train_df,class_weights=config['class_weights'],pred_weights=config['pred_weights'],save_model_path=config['save_path'])
    

def train_precision_zt_after(train_df):
    config = presicion_configs[1]
    train_model_high_precision(train_df,class_weights=config['class_weights'],pred_weights=config['pred_weights'],save_model_path=config['save_path'])
    
    


'''
    高精准度模型
'''

def train_model_high_precision(train_df, class_weights, pred_weights, save_model_path):
    df = train_df
    # 数据准备（假设df包含特征列及三日后涨幅分类标签label）
    ret_columns = [col for col in df.columns if col.startswith('ret')]
    features = df.drop(columns=['label', 'date', 'code'] + ret_columns, errors='ignore')

    labels = df['label']  # 标签列已按规则映射为1-4[6](@ref)
    print(f"precision清洗后有效样本数: {len(features)}")

    # 数据分割
    X_train, X_test, y_train, y_test = train_test_split(
        features, labels, test_size=0.2, shuffle=False
    )
     # === 使用传入的权重配置 ===
    print(f"类别权重1: {class_weights}")
    print(f"预测权重: {pred_weights}")
  
    sample_weights = compute_sample_weight(class_weight=class_weights, y=y_train)
    
    # 转换为LightGBM数据集格式（新增：权重 weight=sample_weights）
    train_data = lgb.Dataset(X_train, label=y_train, weight=sample_weights)
    test_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
    
    # 模型参数配置（多分类核心参数）
    params = {
        'objective': 'multiclass',  # 多分类目标函数[1,6](@ref)
        'num_class': 4,            # 分类数对应label的1-4
        'metric': 'multi_logloss',  # 多分类对数损失评估[6](@ref)
        'learning_rate': 0.005,
        'num_leaves': 65,
        'max_depth': 8,                # ↑增强非线性拟合
        'min_data_in_leaf': 300,         # 防止过拟合
        'min_gain_to_split': 0.05,  # 添加分裂增益阈值[6](@ref)

        # 正则化强化
        'reg_lambda': 0.5,              # ↑L2正则化强度
        'feature_fraction': 0.8,        # ↓特征采样比例
        'boosting_type': 'dart',         # 增强泛化能力
        'max_cat_to_onehot': 4,  # 优化类别特征处理[6](@ref)
        'scale_pos_weight': 0.5,  # 抑制正例权重[6](@ref)
        'top_rate': 0.2,          # 限制头部样本影响[7](@ref)
        'path_smooth': 0.5,        # 平滑决策路径[6](@ref)

    }
    
    model = lgb.train(
        params,
        train_data,
        num_boost_round=3000,
        valid_sets=[test_data],
        # === 新增参数 ===
        callbacks=[
            # lgb.early_stopping(stopping_rounds=150),
            lgb.log_evaluation(300),
            lgb.reset_parameter(
                learning_rate=lambda iter: max(0.0005, 0.01*(0.999**iter))  # 保底0.001
            )
        ],
        feval=macro_f1_score  # 必须添加F1监控
    )
    model.save_model(save_model_path)  # 文本格式
    print(f"模型已保存至: {save_model_path}")

    # # 预测概率矩阵
    y_pred_prob = model.predict(X_test)  # 形状: [n_samples, n_classes]
    weighted_probs = y_pred_prob * pred_weights 
    y_pred = weighted_probs.argmax(axis=1)
    
    print(f"Accuracy: {accuracy_score(y_test, y_pred):.4f}")
    print("\nClassification Report:\n", classification_report(y_test, y_pred))
    print("Confusion Matrix:\n", confusion_matrix(y_test, y_pred))






# === 新增代码：宏F1评估函数 ===
def macro_f1_score(y_pred, dataset):
    y_true = dataset.get_label().astype(int)
    y_pred = y_pred.reshape(-1, 4).argmax(axis=1)
    return 'macro_f1', f1_score(y_true, y_pred, average='macro'), True






