import tushare as ts
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVR
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import (mean_squared_error, mean_absolute_error,
                             r2_score, accuracy_score, precision_score,
                             recall_score, f1_score)
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import (Conv1D, LSTM, Dense, Dropout,
                                     Bidirectional, GRU, Input,
                                     Multiply, Permute, RepeatVector,
                                     Flatten, concatenate, LayerNormalization,
                                     MultiHeadAttention, Layer)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow import reshape, transpose, matmul, math, cast, float32
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras import backend as K
import matplotlib.pyplot as plt
import seaborn as sns
import os
import warnings
from math import sqrt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes, mark_inset
import matplotlib.dates as mdates

# 设置中文显示
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
plt.rcParams['axes.unicode_minus'] = False

# 忽略警告
warnings.filterwarnings('ignore')

# --------- 1. TuShare 初始化 ---------
ts.set_token('c29dde5abfe811a80f85ae7db0f5107baff4fc382842708b63b30817')
pro = ts.pro_api()

# --------- 2. 参数设置 ---------
bank_stocks = {
    '招商银行': '600036.SH',  # 零售银行代表
    '中国银行': '601988.SH'  # 国有大行代表
}
today = datetime.today()
end_date = today.strftime('%Y%m%d')
start_date = (today - timedelta(days=365 * 3)).strftime('%Y%m%d')  # 使用3年数据
n_days = 10  # 使用前10天预测未来1天
n_features = 7  # 特征数量
epochs = 100
batch_size = 32
validation_split = 0.1
train_test_split = 0.8
# 训练集比例
critical_period_start = "2024-01-01"  # 关键转折点时间段开始
critical_period_end = "2024-06-30"  # 关键转折点时间段结束


# --------- 3. 模型定义 ---------
class Time2Vector(Layer):
    """时间特征编码层"""

    def __init__(self, seq_len):
        super(Time2Vector, self).__init__()
        self.seq_len = seq_len

    def build(self, input_shape):
        self.weights_linear = self.add_weight(name='weight_linear',
                                              shape=(int(self.seq_len),),
                                              initializer=RandomNormal(mean=0.0, stddev=0.05))
        self.bias_linear = self.add_weight(name='bias_linear',
                                           shape=(int(self.seq_len),),
                                           initializer=RandomNormal(mean=0.0, stddev=0.05))
        self.weights_periodic = self.add_weight(name='weight_periodic',
                                                shape=(int(self.seq_len),),
                                                initializer=RandomNormal(mean=0.0, stddev=0.05))
        self.bias_periodic = self.add_weight(name='bias_periodic',
                                             shape=(int(self.seq_len),),
                                             initializer=RandomNormal(mean=0.0, stddev=0.05))

    def call(self, x):
        x = math.reduce_mean(x, axis=-1)  # 沿特征维度取平均
        time_linear = self.weights_linear * x + self.bias_linear
        time_linear = K.expand_dims(time_linear, axis=-1)

        time_periodic = math.sin(self.weights_periodic * x + self.bias_periodic)
        time_periodic = K.expand_dims(time_periodic, axis=-1)

        return concatenate([time_linear, time_periodic], axis=-1)


class TransformerEncoder(Layer):
    """Transformer编码器层"""

    def __init__(self, d_k, d_v, n_heads, ff_dim, dropout=0.1):
        super(TransformerEncoder, self).__init__()
        self.d_k = d_k
        self.d_v = d_v
        self.n_heads = n_heads
        self.ff_dim = ff_dim
        self.dropout_rate = dropout

    def build(self, input_shape):
        self.attention = MultiHeadAttention(
            num_heads=self.n_heads,
            key_dim=self.d_k,
            value_dim=self.d_v,
            dropout=self.dropout_rate
        )
        self.attention_dropout = Dropout(self.dropout_rate)
        self.attention_norm = LayerNormalization(epsilon=1e-6)

        self.ffn = Sequential([
            Dense(self.ff_dim, activation='relu'),
            Dense(input_shape[-1])
        ])
        self.ffn_dropout = Dropout(self.dropout_rate)
        self.ffn_norm = LayerNormalization(epsilon=1e-6)

    def call(self, inputs):
        # 自注意力机制
        attention_output = self.attention(inputs, inputs)
        attention_output = self.attention_dropout(attention_output)
        attention_output = self.attention_norm(inputs + attention_output)

        # 前馈网络
        ffn_output = self.ffn(attention_output)
        ffn_output = self.ffn_dropout(ffn_output)
        output = self.ffn_norm(attention_output + ffn_output)

        return output


def build_transformer_model(input_shape):
    """构建Transformer模型"""
    inputs = Input(shape=input_shape)

    # 时间特征编码
    time_embedding = Time2Vector(input_shape[0])(inputs)
    x = concatenate([inputs, time_embedding], axis=-1)

    # Transformer编码层
    x = TransformerEncoder(d_k=64, d_v=64, n_heads=4, ff_dim=128)(x)
    x = Dropout(0.1)(x)

    # 输出层
    x = Flatten()(x)
    outputs = Dense(1)(x)

    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer=Adam(learning_rate=0.001), loss='mse')
    return model


def build_cnn_lstm_attention_model(input_shape):
    """构建CNN-LSTM-Attention混合模型"""
    inputs = Input(shape=input_shape)
    cnn = Conv1D(filters=64, kernel_size=3, activation='relu', padding='same')(inputs)
    cnn = Dropout(0.2)(cnn)
    lstm = LSTM(64, return_sequences=True)(cnn)
    attention = Dense(64, activation='tanh')(lstm)
    attention = Dense(1, activation='softmax')(attention)
    attention = Multiply()([lstm, attention])
    attention = Flatten()(attention)
    outputs = Dense(1)(attention)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer=Adam(learning_rate=0.001), loss='mse')
    return model


def build_bilstm_attention_model(input_shape):
    """构建双向LSTM-Attention模型"""
    inputs = Input(shape=input_shape)
    bilstm = Bidirectional(LSTM(64, return_sequences=True))(inputs)
    bilstm = Dropout(0.2)(bilstm)
    attention = Dense(64, activation='tanh')(bilstm)
    attention = Dense(1, activation='softmax')(attention)
    attention = Multiply()([bilstm, attention])
    attention = Flatten()(attention)
    outputs = Dense(1)(attention)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer=Adam(learning_rate=0.001), loss='mse')
    return model


def build_bigru_attention_model(input_shape):
    """构建双向GRU-Attention模型"""
    inputs = Input(shape=input_shape)
    bigru = Bidirectional(GRU(64, return_sequences=True))(inputs)
    bigru = Dropout(0.2)(bigru)
    attention = Dense(64, activation='tanh')(bigru)
    attention = Dense(1, activation='softmax')(attention)
    attention = Multiply()([bigru, attention])
    attention = Flatten()(attention)
    outputs = Dense(1)(attention)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer=Adam(learning_rate=0.001), loss='mse')
    return model


def build_lstm_attention_model(input_shape):
    """仅LSTM+Attention模型"""
    inputs = Input(shape=input_shape)
    lstm = LSTM(64, return_sequences=True)(inputs)
    attention = Dense(64, activation='tanh')(lstm)
    attention = Dense(1, activation='softmax')(attention)
    attention = Multiply()([lstm, attention])
    attention = Flatten()(attention)
    outputs = Dense(1)(attention)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer=Adam(learning_rate=0.001), loss='mse')
    return model


def build_cnn_lstm_model(input_shape):
    """不带Attention的CNN-LSTM模型"""
    inputs = Input(shape=input_shape)
    cnn = Conv1D(filters=64, kernel_size=3, activation='relu', padding='same')(inputs)
    cnn = Dropout(0.2)(cnn)
    lstm = LSTM(64)(cnn)
    outputs = Dense(1)(lstm)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer=Adam(learning_rate=0.001), loss='mse')
    return model


def build_lstm_only_model(input_shape):
    """仅LSTM模型"""
    inputs = Input(shape=input_shape)
    lstm = LSTM(64)(inputs)
    outputs = Dense(1)(lstm)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer=Adam(learning_rate=0.001), loss='mse')
    return model


def build_cnn_only_model(input_shape):
    """仅CNN模型"""
    inputs = Input(shape=input_shape)
    cnn = Conv1D(filters=64, kernel_size=3, activation='relu', padding='same')(inputs)
    cnn = Flatten()(cnn)
    outputs = Dense(1)(cnn)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer=Adam(learning_rate=0.001), loss='mse')
    return model


def build_svm_model():
    """构建SVM模型"""
    return SVR(kernel='rbf', C=1.0, epsilon=0.1)


def build_gbdt_model():
    """构建GBDT模型"""
    return GradientBoostingRegressor(n_estimators=100, learning_rate=0.1,
                                     max_depth=3, random_state=42)

# --------- 4. 数据处理函数（修复版） ---------
def prepare_stock_data(stock_code, data_dir):
    """准备股票数据并保存原始数据"""
    print(f"\n正在获取 {stock_code} 数据...")
    df = pro.daily(ts_code=stock_code, start_date=start_date, end_date=end_date)
    df = df.sort_values('trade_date')

    # 保存原始数据
    os.makedirs(f'{data_dir}/raw_data', exist_ok=True)
    raw_data_path = f'{data_dir}/raw_data/{stock_code}_raw_data.csv'
    df.to_csv(raw_data_path, index=False, encoding='utf_8_sig')
    print(f"原始数据已保存至: {raw_data_path}")

    # 计算技术指标
    df['pct_chg'] = df['close'].pct_change() * 100
    df['turnover_rate'] = df['vol'] / df['vol'].rolling(20).mean()

    # 特征选择
    feature_cols = ['open', 'high', 'low', 'close', 'vol', 'pct_chg', 'turnover_rate']
    df = df[['trade_date'] + feature_cols].dropna()
    df.reset_index(drop=True, inplace=True)

    # 确保日期列是datetime类型
    df['trade_date'] = pd.to_datetime(df['trade_date'], format='%Y%m%d')

    # 数据归一化
    scaler = MinMaxScaler()
    scaled_data = scaler.fit_transform(df[feature_cols])

    # 构建时间序列数据
    X, y = [], []
    for i in range(n_days, len(scaled_data)):
        X.append(scaled_data[i - n_days:i, :])
        y.append(scaled_data[i, feature_cols.index('close')])

    X, y = np.array(X), np.array(y)

    # 划分数据集
    split = int(len(X) * train_test_split)
    X_train, X_test = X[:split], X[split:]
    y_train, y_test = y[:split], y[split:]

    # 提取日期并转换为datetime
    dates = df['trade_date'].values[n_days:]
    dates_train = dates[:split]
    dates_test = dates[split:]

    # 为传统机器学习模型准备数据
    X_train_ml = X_train[:, -1, :]
    X_test_ml = X_test[:, -1, :]

    return {
        'X_train': X_train,
        'X_test': X_test,
        'y_train': y_train,
        'y_test': y_test,
        'dates_train': dates_train,
        'dates_test': dates_test,
        'X_train_ml': X_train_ml,
        'X_test_ml': X_test_ml,
        'scaler': scaler,
        'feature_cols': feature_cols,
        'df': df
    }

def prepare_ablation_data(stock_code, features, data_dir):
    """准备消融实验数据并保存原始数据"""
    print(f"\n正在准备消融实验数据: {stock_code} - {features}")
    df = pro.daily(ts_code=stock_code, start_date=start_date, end_date=end_date)
    df = df.sort_values('trade_date')

    # 保存原始数据
    os.makedirs(f'{data_dir}/raw_data', exist_ok=True)
    feature_str = '_'.join(features)
    raw_data_path = f'{data_dir}/raw_data/{stock_code}_{feature_str}_raw_data.csv'
    df.to_csv(raw_data_path, index=False, encoding='utf_8_sig')
    print(f"原始数据已保存至: {raw_data_path}")

    # 计算技术指标
    df['pct_chg'] = df['close'].pct_change() * 100
    df['turnover_rate'] = df['vol'] / df['vol'].rolling(20).mean()

    # 选择指定特征
    df = df[['trade_date'] + features].dropna()
    df.reset_index(drop=True, inplace=True)

    # 确保日期列是datetime类型
    df['trade_date'] = pd.to_datetime(df['trade_date'], format='%Y%m%d')

    # 数据归一化
    scaler = MinMaxScaler()
    scaled_data = scaler.fit_transform(df[features])

    # 构建时间序列数据
    X, y = [], []
    for i in range(n_days, len(scaled_data)):
        X.append(scaled_data[i - n_days:i, :])
        y.append(scaled_data[i, features.index('close')])

    X, y = np.array(X), np.array(y)

    # 划分数据集
    split = int(len(X) * train_test_split)
    X_train, X_test = X[:split], X[split:]
    y_train, y_test = y[:split], y[split:]

    # 提取日期并转换为datetime
    dates = df['trade_date'].values[n_days:]
    dates_train = dates[:split]
    dates_test = dates[split:]

    return {
        'X_train': X_train,
        'X_test': X_test,
        'y_train': y_train,
        'y_test': y_test,
        'dates_train': dates_train,
        'dates_test': dates_test,
        'scaler': scaler,
        'feature_cols': features,
        'df': df
    }

# --------- 5. 评估函数 ---------
def evaluate_model(y_true, y_pred, model_name, stock_name):
    """评估模型性能"""
    # 回归指标
    mse = mean_squared_error(y_true, y_pred)
    rmse = sqrt(mse)
    mae = mean_absolute_error(y_true, y_pred)
    r2 = r2_score(y_true, y_pred)
    corr = np.corrcoef(y_true, y_pred)[0, 1]
    avg_error = np.mean(np.abs(y_true - y_pred))

    # 分类指标（涨跌方向）
    if len(y_true) > 1:  # 确保足够的数据点计算方向指标
        y_true_class = np.where(np.diff(y_true) > 0, 1, 0)
        y_pred_class = np.where(np.diff(y_pred) > 0, 1, 0)
        min_len = min(len(y_true_class), len(y_pred_class))
    else:
        min_len = 0

    metrics = {
        '股票名称': stock_name,
        '模型名称': model_name,
        'MSE': mse,
        'RMSE': rmse,
        'MAE': mae,
        'R方': r2,
        '相关系数': corr,
        '平均误差': avg_error,
        'Accuracy': accuracy_score(y_true_class[:min_len], y_pred_class[:min_len]) if min_len > 0 else None,
        'Precision': precision_score(y_true_class[:min_len], y_pred_class[:min_len],
                                     zero_division=0) if min_len > 0 else None,
        'Recall': recall_score(y_true_class[:min_len], y_pred_class[:min_len],
                               zero_division=0) if min_len > 0 else None,
        'F1': f1_score(y_true_class[:min_len], y_pred_class[:min_len], zero_division=0) if min_len > 0 else None
    }
    return metrics, pd.DataFrame([metrics])

def normalize_metrics(all_metrics_df):
    """对指标进行标准化处理，使雷达图显示更合理"""
    # 需要最大化的指标（值越大越好）
    higher_better = ['R方', 'Accuracy', 'Precision', 'Recall', 'F1', '相关系数']

    # 需要最小化的指标（值越小越好）
    lower_better = ['MSE', 'RMSE', 'MAE', '平均误差']

    # 对需要最小化的指标取倒数
    for metric in lower_better:
        all_metrics_df[metric] = 1 / (1 + all_metrics_df[metric])

    return all_metrics_df

# --------- 6. 可视化函数（整合所有功能） ---------
def plot_model_prediction(stock_name, model_name, dates_train, y_train_pred,
                          dates_test, y_test_pred, y_train_true, y_test_true, test_mae, filename):
    """
    绘制模型预测结果图（优化版）

    主要增强点：
    1. 添加±1个MAE的置信区间（半透明色带）
    2. 增加关键转折点（如2024Q1-Q2）的放大细节
    """
    fig, ax = plt.subplots(figsize=(14, 7))

    # 合并所有日期和对应的价格
    all_dates = np.concatenate([dates_train, dates_test])
    all_true = np.concatenate([y_train_true, y_test_true])
    all_pred = np.concatenate([y_train_pred, y_test_pred])

    # 设置日期格式
    date_fmt = mdates.DateFormatter('%Y-%m')

    # 绘制所有真实价格
    ax.plot(all_dates, all_true, 'b-', label='真实价格', linewidth=2, alpha=0.8)

    # 绘制测试集预测价格
    test_mask = (all_dates >= dates_test[0]) if len(dates_test) > 0 else np.zeros(len(all_dates), dtype=bool)

    if any(test_mask):
        ax.plot(all_dates[test_mask], all_pred[test_mask], 'r--',
                label=f'{model_name}预测', linewidth=1.5)

        # 添加±1个MAE的置信区间（半透明色带）
        if test_mae > 0:
            ax.fill_between(all_dates[test_mask],
                            all_pred[test_mask] - test_mae,
                            all_pred[test_mask] + test_mae,
                            color='red', alpha=0.15,
                            label='±1 MAE置信区间')

    # 添加训练/测试分界线
    if len(dates_test) > 0:
        split_date = dates_test[0]
        ax.axvline(x=split_date, color='gray', linestyle=':', linewidth=1.2)
        ax.text(split_date, ax.get_ylim()[0] + (ax.get_ylim()[1] - ax.get_ylim()[0]) * 0.02,
                ' 测试集开始', color='gray', fontsize=10, rotation=90)

    # 设置日期格式
    ax.xaxis.set_major_formatter(date_fmt)
    ax.xaxis.set_major_locator(mdates.MonthLocator(interval=3))
    plt.xticks(rotation=45)

    # 设置标题和标签
    ax.set_title(f'{stock_name} - {model_name}预测结果', fontsize=16)
    ax.set_xlabel('日期', fontsize=12)
    ax.set_ylabel('收盘价 (元)', fontsize=12)
    ax.grid(True, alpha=0.3)
    ax.legend(loc='upper left', fontsize=10)

    # 添加关键转折点放大镜（2024Q1-Q2）
    try:
        # 设置放大时间段（2024Q1-Q2）
        critical_start = pd.Timestamp(critical_period_start)
        critical_end = pd.Timestamp(critical_period_end)
        critical_mask = (all_dates >= critical_start) & (all_dates <= critical_end)

        # 检查时间段内是否有足够数据点
        if critical_mask.sum() < 2:
            print(f"警告: {critical_period_start}到{critical_period_end}时间段内数据不足，跳过放大镜")
        else:
            # 创建放大图的坐标位置（位于左下角）
            axins = inset_axes(ax, width="35%", height="35%", loc='lower left',
                               bbox_to_anchor=(0.02, 0.06, 0.98, 0.98),
                               bbox_transform=ax.transAxes)

            # 绘制放大区域
            axins.plot(all_dates[critical_mask], all_true[critical_mask], 'b-', linewidth=1.5)
            axins.plot(all_dates[critical_mask], all_pred[critical_mask], 'r--', linewidth=1.2)

            # 绘制置信区间
            if test_mae > 0 and any(test_mask):
                axins.fill_between(all_dates[critical_mask],
                                   all_pred[critical_mask] - test_mae,
                                   all_pred[critical_mask] + test_mae,
                                   color='red', alpha=0.15)

            # 设置放大图格式
            axins.set_title('2024Q1-Q2 关键转折点', fontsize=9)
            axins.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
            axins.xaxis.set_major_locator(mdates.MonthLocator())
            axins.grid(True, alpha=0.3)
            plt.setp(axins.get_xticklabels(), rotation=45, fontsize=8)

            # 标记放大区域在主图中的位置
            mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5", linestyle="--", alpha=0.7)

    except Exception as e:
        print(f"添加放大镜失败: {str(e)}")

    plt.tight_layout()
    plt.savefig(filename, dpi=300, bbox_inches='tight')
    plt.close()

def plot_radar_chart(all_metrics_df, filename, normalized=False):
    """绘制综合雷达图"""
    # 准备所有评估指标
    categories = ['R方', 'Accuracy', 'Precision', 'Recall', 'F1',
                  'MSE', 'RMSE', 'MAE', '平均误差', '相关系数']
    N = len(categories)

    # 设置角度和标签
    angles = [n / float(N) * 2 * np.pi for n in range(N)]
    angles += angles[:1]

    plt.figure(figsize=(12, 12))
    ax = plt.subplot(111, polar=True)

    # 调整雷达图布局
    ax.set_theta_offset(np.pi / 2)
    ax.set_theta_direction(-1)

    # 对每个模型和股票组合绘制雷达图
    color_palette = sns.color_palette("husl", len(bank_stocks) * 10)
    color_idx = 0

    for stock_name in bank_stocks.keys():
        for model_name in ['CNN-LSTM-Attention', 'BiLSTM-Attention',
                           'BiGRU-Attention', 'Transformer',
                           'LSTM-Attention', 'CNN-LSTM',
                           'LSTM-Only', 'CNN-Only',
                           'SVM', 'GBDT']:
            # 获取当前模型和股票的指标数据
            subset = all_metrics_df[(all_metrics_df['股票名称'] == stock_name) &
                                    (all_metrics_df['模型名称'] == model_name)]
            if len(subset) > 0:
                # 准备雷达图数据
                values = subset[categories].values.flatten().tolist()
                values += values[:1]

                # 绘制雷达图线条
                ax.plot(angles, values, linewidth=1.8, linestyle='solid',
                        label=f"{stock_name}-{model_name}",
                        color=color_palette[color_idx])

                # 填充颜色，使用透明度突出差异
                ax.fill(angles, values, alpha=0.15, color=color_palette[color_idx])
                color_idx += 1

    # 设置雷达图标签
    ax.set_xticks(angles[:-1])
    ax.set_xticklabels(categories, fontsize=10)

    # 添加标题和图例
    title = '银行股模型性能综合雷达图' + (' (标准化后)' if normalized else '')
    plt.title(title, size=18, y=1.05)
    plt.legend(loc='upper right', bbox_to_anchor=(1.3, 1.1), fontsize=8)

    # 调整布局并保存
    plt.tight_layout()
    plt.savefig(filename, dpi=300, bbox_inches='tight')
    plt.close()

def plot_feature_ablation_radar(feature_ablation_df, model_name, filename):
    """
    优化特征消融雷达图
    增加差异区域突出显示
    """
    # 准备指标
    categories = ['R方', 'Accuracy', 'Precision', 'Recall', 'F1']
    N = len(categories)
    angles = [n / float(N) * 2 * np.pi for n in range(N)]
    angles += angles[:1]

    # 获取基准组（完整特征）的数据
    baseline_df = feature_ablation_df[feature_ablation_df['模型名称'].str.endswith('all_features')]

    fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(polar=True))

    # 配置极坐标设置
    ax.set_theta_offset(np.pi / 2)
    ax.set_theta_direction(-1)
    ax.set_xticks(angles[:-1])
    ax.set_xticklabels(categories, fontsize=10)

    # 如果没有基线数据，使用默认值
    if not baseline_df.empty:
        baseline_values = baseline_df[categories].values.flatten().tolist()
        baseline_values += baseline_values[:1]
        ax.plot(angles, baseline_values, 'b-', linewidth=2, label='完整特征')
        ax.fill(angles, baseline_values, color='blue', alpha=0.1)

    # 获取所有组合类型及其颜色
    color_palette = plt.cm.get_cmap('tab10', len(feature_ablation_df))

    # 处理每个特征组合
    for idx, row in enumerate(feature_ablation_df.itertuples()):
        feature_type = row.模型名称.split('_')[-1]
        if feature_type == 'all_features':
            continue  # 跳过基线

        # 准备数据
        values = [getattr(row, c) for c in categories]
        values += values[:1]

        # 绘制特征组合
        ax.plot(angles, values, 'o-', linewidth=1.5, color=color_palette(idx),
                markersize=4, label=feature_type)

        # 用fill_between突出差异
        if not baseline_df.empty:
            ax.fill_between(angles, baseline_values, values,
                            where=[v > b for v, b in zip(values, baseline_values)],
                            facecolor='green', alpha=0.1, interpolate=True)
            ax.fill_between(angles, baseline_values, values,
                            where=[v < b for v, b in zip(values, baseline_values)],
                            facecolor='red', alpha=0.1, interpolate=True)

    # 设置标题和标签
    ax.set_title(f'{model_name}特征消融实验', fontsize=16)
    ax.legend(loc='upper right', bbox_to_anchor=(1.3, 1.1), fontsize=9)

    plt.tight_layout()
    plt.savefig(filename, dpi=300, bbox_inches='tight')
    plt.close()

def plot_comparison(all_metrics_df, metric, filename):
    """绘制模型对比柱状图"""
    plt.figure(figsize=(12, 6))
    sns.barplot(x='模型名称', y=metric, hue='股票名称', data=all_metrics_df)
    plt.title(f'银行股模型对比 - {metric}', fontsize=14)
    plt.xticks(rotation=45)
    plt.tight_layout()
    plt.savefig(filename, dpi=300)
    plt.close()

def plot_model_ablation(all_metrics_df, filename):
    """绘制模型组件消融实验结果"""
    plt.figure(figsize=(15, 8))

    # 筛选消融模型
    ablation_models = ['CNN-LSTM-Attention', 'LSTM-Attention',
                       'CNN-LSTM', 'LSTM-Only', 'CNN-Only']
    df = all_metrics_df[all_metrics_df['模型名称'].isin(ablation_models)]

    # 设置指标和颜色
    metrics = ['R方', 'MAE', 'Accuracy']
    colors = ['#1f77b4', '#ff7f0e', '#2ca02c']

    # 创建子图
    for i, metric in enumerate(metrics):
        plt.subplot(1, 3, i + 1)
        sns.barplot(x='模型名称', y=metric, hue='股票名称', data=df, palette='Set2')
        plt.title(f'{metric}对比', fontsize=12)
        plt.xticks(rotation=45)
        plt.xlabel('')
        if metric == 'R方':
            plt.ylim(0, 1)

    plt.suptitle('模型组件消融实验结果', fontsize=16, y=1.05)
    plt.tight_layout()
    plt.savefig(filename, dpi=300, bbox_inches='tight')
    plt.close()

def plot_feature_ablation_comparison(feature_ablation_df, filename):
    """绘制特征消融实验结果柱状图"""
    plt.figure(figsize=(15, 8))

    # 设置指标和颜色
    metrics = ['R方', 'MAE', 'Accuracy']
    colors = ['#1f77b4', '#ff7f0e', '#2ca02c']

    # 创建子图
    for i, metric in enumerate(metrics):
        plt.subplot(1, 3, i + 1)
        sns.barplot(x='模型名称', y=metric, hue='股票名称', data=feature_ablation_df, palette='Set2')
        plt.title(f'{metric}对比', fontsize=12)
        plt.xticks(rotation=45)
        plt.xlabel('')
        if metric == 'R方':
            plt.ylim(0, 1)

    plt.suptitle('特征消融实验结果', fontsize=16, y=1.05)
    plt.tight_layout()
    plt.savefig(filename, dpi=300, bbox_inches='tight')
    plt.close()

# --------- 7. 主程序 ---------
def main():
    """主程序"""
    timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
    data_dir = f"stock_prediction_ablation_{timestamp}"
    os.makedirs(data_dir, exist_ok=True)

    all_metrics = []
    ablation_results = []
    feature_ablation_results = []

    # 定义所有模型
    models = {
        'CNN-LSTM-Attention': build_cnn_lstm_attention_model,
        'BiLSTM-Attention': build_bilstm_attention_model,
        'BiGRU-Attention': build_bigru_attention_model,
        'Transformer': build_transformer_model,
        'LSTM-Attention': build_lstm_attention_model,
        'CNN-LSTM': build_cnn_lstm_model,
        'LSTM-Only': build_lstm_only_model,
        'CNN-Only': build_cnn_only_model,
        'SVM': build_svm_model,
        'GBDT': build_gbdt_model
    }

    # 定义特征消融实验的特征组合
    feature_sets = {
        'all_features': ['open', 'high', 'low', 'close', 'vol', 'pct_chg', 'turnover_rate'],
        'no_volume': ['open', 'high', 'low', 'close', 'pct_chg'],
        'no_technical': ['open', 'high', 'low', 'close', 'vol'],
        'only_price': ['open', 'high', 'low', 'close'],
        'only_close': ['close']
    }

    for stock_name, stock_code in bank_stocks.items():
        # 1. 运行完整模型对比
        print(f"\n===== 开始处理 {stock_name} =====")
        full_data = prepare_stock_data(stock_code, data_dir)

        for model_name, model_builder in models.items():
            print(f"正在训练模型: {model_name}")

            # 构建和训练模型
            if model_name in ['SVM', 'GBDT']:
                # 传统机器学习模型
                model = model_builder()
                model.fit(full_data['X_train_ml'], full_data['y_train'])

                # 预测
                train_pred = model.predict(full_data['X_train_ml']).reshape(-1, 1)
                test_pred = model.predict(full_data['X_test_ml']).reshape(-1, 1)
            else:
                # 深度学习模型
                model = model_builder((n_days, n_features))
                history = model.fit(
                    full_data['X_train'], full_data['y_train'],
                    epochs=epochs,
                    batch_size=batch_size,
                    validation_split=validation_split,
                    callbacks=[EarlyStopping(patience=10)],
                    verbose=0
                )

                # 预测
                train_pred = model.predict(full_data['X_train'])
                test_pred = model.predict(full_data['X_test'])

            # 反归一化
            close_index = full_data['feature_cols'].index('close')

            def inverse_transform(pred, y_true):
                dummy = np.zeros((len(pred), n_features))
                dummy[:, close_index] = pred.flatten()
                return full_data['scaler'].inverse_transform(dummy)[:, close_index]

            train_pred_prices = inverse_transform(train_pred, full_data['y_train'])
            test_pred_prices = inverse_transform(test_pred, full_data['y_test'])
            train_true_prices = inverse_transform(full_data['y_train'], full_data['y_train'])
            test_true_prices = inverse_transform(full_data['y_test'], full_data['y_test'])

            # 评估
            metrics, _ = evaluate_model(test_true_prices, test_pred_prices, model_name, stock_name)
            all_metrics.append(metrics)

            # 保存预测结果
            result_df = pd.DataFrame({
                '日期': np.concatenate([full_data['dates_train'], full_data['dates_test']]),
                '真实价格': np.concatenate([train_true_prices, test_true_prices]),
                '预测价格': np.concatenate([train_pred_prices, test_pred_prices]),
                '数据集': ['训练集'] * len(full_data['dates_train']) + ['测试集'] * len(full_data['dates_test'])
            })
            result_df.to_csv(f'{data_dir}/{stock_name}_{model_name}_results.csv',
                             index=False, encoding='utf_8_sig')

            # 可视化（使用优化后的函数）
            plot_model_prediction(
                stock_name, model_name,
                full_data['dates_train'], train_pred_prices,
                full_data['dates_test'], test_pred_prices,
                train_true_prices, test_true_prices,
                metrics['MAE'],  # 传入MAE用于绘制置信区间
                f'{data_dir}/{stock_name}_{model_name}_prediction.png'
            )

        # 2. 运行特征消融实验
        for model_name in ['CNN-LSTM-Attention', 'LSTM-Attention']:
            print(f"\n===== 开始特征消融实验: {stock_name} - {model_name} =====")

            for feature_name, features in feature_sets.items():
                print(f"特征组合: {feature_name}")

                # 准备数据
                data = prepare_ablation_data(stock_code, features, data_dir)

                # 构建和训练模型
                model = models[model_name]((n_days, len(features)))
                model.fit(
                    data['X_train'], data['y_train'],
                    epochs=epochs,
                    batch_size=batch_size,
                    validation_split=validation_split,
                    callbacks=[EarlyStopping(patience=10)],
                    verbose=0
                )

                # 预测
                test_pred = model.predict(data['X_test'])

                # 反归一化
                close_index = data['feature_cols'].index('close')

                def inverse_transform(pred, y_true):
                    dummy = np.zeros((len(pred), len(features)))
                    dummy[:, close_index] = pred.flatten()
                    return data['scaler'].inverse_transform(dummy)[:, close_index]

                test_pred_prices = inverse_transform(test_pred, data['y_test'])
                test_true_prices = inverse_transform(data['y_test'], data['y_test'])

                # 评估
                metrics, _ = evaluate_model(test_true_prices, test_pred_prices,
                                            f"{model_name}_{feature_name}", stock_name)
                feature_ablation_results.append(metrics)

                # 保存预测结果
                result_df = pd.DataFrame({
                    '日期': data['dates_test'],
                    '真实价格': test_true_prices,
                    '预测价格': test_pred_prices
                })
                result_df.to_csv(f'{data_dir}/{stock_name}_{model_name}_{feature_name}_results.csv',
                                 index=False, encoding='utf_8_sig')

            # 绘制特征消融雷达图（优化版）
            current_results = [r for r in feature_ablation_results
                               if r['模型名称'].startswith(model_name) and r['股票名称'] == stock_name]
            if current_results:
                plot_feature_ablation_radar(
                    pd.DataFrame(current_results),
                    model_name,
                    f'{data_dir}/{stock_name}_{model_name}_feature_ablation.png'
                )

    # 保存和可视化结果
    all_metrics_df = pd.DataFrame(all_metrics)
    all_metrics_df.to_csv(f'{data_dir}/all_metrics.csv', index=False, encoding='utf_8_sig')

    if feature_ablation_results:
        feature_ablation_df = pd.DataFrame(feature_ablation_results)
        feature_ablation_df.to_csv(f'{data_dir}/feature_ablation_results.csv',
                                   index=False, encoding='utf_8_sig')

    # 绘制雷达图
    plot_radar_chart(all_metrics_df, f'{data_dir}/radar_chart_original.png', normalized=False)
    normalized_metrics_df = normalize_metrics(all_metrics_df.copy())
    plot_radar_chart(normalized_metrics_df, f'{data_dir}/radar_chart_normalized.png', normalized=True)

    # 新增: 绘制各项指标的对比图
    metrics_to_plot = ['R方', 'MAE', 'Accuracy', 'F1', '相关系数']
    for metric in metrics_to_plot:
        plot_comparison(all_metrics_df, metric, f'{data_dir}/comparison_{metric}.png')

    # 新增: 绘制模型消融对比图
    plot_model_ablation(all_metrics_df, f'{data_dir}/model_ablation_comparison.png')

    # 新增: 绘制特征消融对比图
    if feature_ablation_results:
        plot_feature_ablation_comparison(feature_ablation_df, f'{data_dir}/feature_ablation_comparison.png')

    # 添加说明文件
    with open(f'{data_dir}/实验说明.txt', 'w', encoding='utf-8') as f:
        f.write("""实验说明：

1. 模型组件消融实验：
- CNN-LSTM-Attention: 完整模型(CNN+LSTM+Attention)
- LSTM-Attention: 仅LSTM+Attention
- CNN-LSTM: CNN+LSTM(无Attention)
- LSTM-Only: 仅LSTM层
- CNN-Only: 仅CNN层

2. 特征消融实验：
- all_features: 全部7个特征
- no_volume: 去除成交量相关特征(vol, turnover_rate)
- no_technical: 去除技术指标(pct_chg, turnover_rate)
- only_price: 仅价格特征(open, high, low, close)
- only_close: 仅收盘价

3. 评估指标说明：
- R方: 越接近1越好
- MAE: 越小越好
- Accuracy: 涨跌方向预测准确率
- Precision: 准确预测上涨的比例
- Recall: 正确识别实际上涨的能力
- F1: Precision和Recall的调和平均

4. 包含模型：
- 深度学习模型: CNN-LSTM-Attention, BiLSTM-Attention, BiGRU-Attention, Transformer
- 消融模型: LSTM-Attention, CNN-LSTM, LSTM-Only, CNN-Only
- 传统模型: SVM, GBDT

5. 可视化优化：
- 预测对比图新增2024Q1-Q2关键转折点放大镜
- 测试集预测添加±1个MAE置信区间
- 特征消融雷达图突出显示与基准模型的差异区域
- 新增各模型性能对比柱状图
- 新增模型组件消融柱状图
- 时间轴优化显示（按季度显示刻度）
- 错误处理增强（空数据保护）

6. 数据保存说明：
- 原始数据保存在 raw_data 文件夹中
- 模型预测结果保存在各模型对应的CSV文件中
- 评估指标保存在 all_metrics.csv 和 feature_ablation_results.csv 中

7. 关键参数配置：
- 历史天数: 10
- 预测天数: 1
- 训练集比例: 80%
- 数据范围: 2019-01-01至2024-06-30
- 关键转折点分析期: 2024Q1-Q2
""")

    print("\n======= 分析完成 =======")
    print(f"结果已保存到目录: {data_dir}")
    print(f"原始数据保存在: {data_dir}/raw_data/")

if __name__ == "__main__":
    main()