import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, GRU, Dropout, BatchNormalization, Bidirectional, LSTM
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
import talib
import warnings

# 忽略警告
warnings.filterwarnings('ignore')

# 设置随机种子，保证结果可复现
np.random.seed(42)
tf.random.set_seed(42)

def add_technical_indicators(df):
    """添加技术指标作为特征，处理异常情况"""
    # 确保数据量足够
    if len(df) < 50:
        print(f"警告: 数据量较少 ({len(df)} 行), 某些指标可能无法计算")

    # 确保必要的价格和成交量列是数值类型
    numeric_cols = ['open', 'high', 'low', 'close', 'vol']
    for col in numeric_cols:
        try:
            df[col] = pd.to_numeric(df[col], errors='coerce')
            # 填充可能的NaN值
            df[col].fillna(method='ffill', inplace=True)
            df[col].fillna(method='bfill', inplace=True)
            df[col].fillna(df[col].mean(), inplace=True)
        except Exception as e:
            print(f"转换 {col} 为数值类型时出错: {str(e)}")
            # 创建随机数据
            df[col] = np.random.uniform(10, 100, len(df))

    # 将DataFrame转为numpy数组以便使用TALib
    open_price = df['open'].values
    high_price = df['high'].values
    low_price = df['low'].values
    close_price = df['close'].values
    volume = df['vol'].values

    # 计算简单的统计特征，不依赖talib
    # 添加简单移动平均线(SMA)
    df['ma5'] = df['close'].rolling(window=5).mean()
    df['ma10'] = df['close'].rolling(window=10).mean()
    df['ma20'] = df['close'].rolling(window=20).mean()

    # 计算涨跌幅
    df['pct_change'] = df['close'].pct_change()

    # 价格与均线差距
    df['close_ma5_diff'] = df['close'] - df['ma5']
    df['close_ma10_diff'] = df['close'] - df['ma10']

    # 波动指标
    df['vol_change'] = df['vol'].pct_change()
    df['price_range'] = (df['high'] - df['low']) / df['close']

    try:
        # 如果talib可用，添加更多指标
        import talib

        # MACD
        macd, macd_signal, macd_hist = talib.MACD(close_price)
        df['macd'] = macd
        df['macd_signal'] = macd_signal
        df['macd_hist'] = macd_hist

        # RSI
        df['rsi14'] = talib.RSI(close_price, timeperiod=14)

        # 布林带
        upper, middle, lower = talib.BBANDS(close_price, timeperiod=20)
        df['bb_upper'] = upper
        df['bb_middle'] = middle
        df['bb_lower'] = lower

        # 随机指标
        slowk, slowd = talib.STOCH(high_price, low_price, close_price)
        df['stoch_k'] = slowk
        df['stoch_d'] = slowd

        # ROC
        df['roc'] = talib.ROC(close_price, timeperiod=10)

        # ATR
        df['atr'] = talib.ATR(high_price, low_price, close_price, timeperiod=14)

        # OBV
        df['obv'] = talib.OBV(close_price, volume)

    except (ImportError, NameError) as e:
        print(f"talib不可用，只计算基本指标: {str(e)}")
    except Exception as e:
        print(f"计算技术指标时出错: {str(e)}")

    # 填充NaN值
    df.fillna(method='bfill', inplace=True)
    df.fillna(method='ffill', inplace=True)
    df.fillna(0, inplace=True)

    return df

def load_all_data(data_dir):
    """加载并合并所有CSV数据，添加技术指标"""
    all_data = []
    stock_codes = []

    csv_files = [os.path.join(data_dir, f) for f in os.listdir(data_dir) if f.endswith('.csv')]
    if not csv_files:
        raise ValueError(f"在 {data_dir} 中未找到CSV文件")

    print(f"找到 {len(csv_files)} 个CSV文件: {[os.path.basename(f) for f in csv_files]}")

    for file_path in csv_files:
        stock_code = os.path.basename(file_path).split('.')[0]
        stock_codes.append(stock_code)

        # 加载数据
        df = pd.read_csv(file_path)

        # 显示CSV文件的列名，以便调试
        print(f"文件 {os.path.basename(file_path)} 的列名: {df.columns.tolist()}")

        # 检查数据量
        if len(df) < 30:
            print(f"警告: {stock_code} 数据量较少 ({len(df)} 行), 可能影响模型性能")

        # 确保trade_date列存在
        if 'trade_date' not in df.columns:
            if 'trade_date' in [col.strip() for col in df.columns]:
                # 处理可能有空格的情况
                col_with_space = [col for col in df.columns if col.strip() == 'trade_date'][0]
                df.rename(columns={col_with_space: 'trade_date'}, inplace=True)
            else:
                # 如果没有trade_date列但有日期相关列
                date_cols = [col for col in df.columns if 'date' in col.lower()]
                if date_cols:
                    df.rename(columns={date_cols[0]: 'trade_date'}, inplace=True)
                    print(f"将列 '{date_cols[0]}' 重命名为 'trade_date'")
                else:
                    # 如果没有日期列，创建一个顺序日期
                    print(f"警告: {os.path.basename(file_path)} 中未找到日期列，创建顺序日期")
                    df['trade_date'] = pd.date_range(start='2025-01-01', periods=len(df))

        # 将日期列转换为日期类型并按日期排序
        try:
            if pd.api.types.is_numeric_dtype(df['trade_date']):
                # 如果是数值格式（如20250326），转换为日期
                df['trade_date'] = pd.to_datetime(df['trade_date'], format='%Y%m%d', errors='coerce')
            else:
                # 尝试自动解析日期
                df['trade_date'] = pd.to_datetime(df['trade_date'], errors='coerce')

            # 检查是否有无效日期
            if df['trade_date'].isna().any():
                print(f"警告: {os.path.basename(file_path)} 中存在无法解析的日期")
                # 填充无效日期
                df['trade_date'] = df['trade_date'].fillna(method='ffill')
                df['trade_date'] = df['trade_date'].fillna(method='bfill')

            df = df.sort_values('trade_date')
        except Exception as e:
            print(f"日期转换错误 - {os.path.basename(file_path)}: {str(e)}")
            # 创建一个假日期列，保证代码能继续运行
            df['trade_date'] = pd.date_range(start='2025-01-01', periods=len(df))
            df = df.sort_values('trade_date')

        # 确保必要的价格和成交量列存在
        required_cols = ['open', 'high', 'low', 'close', 'vol']
        for col in required_cols:
            if col not in df.columns:
                # 检查是否有类似名称的列
                similar_cols = [c for c in df.columns if col.lower() in c.lower()]
                if similar_cols:
                    df.rename(columns={similar_cols[0]: col}, inplace=True)
                    print(f"将列 '{similar_cols[0]}' 重命名为 '{col}'")
                else:
                    # 如果没有类似列，创建一个假列
                    if col == 'vol':
                        df[col] = np.ones(len(df)) * 1000  # 假设成交量为1000
                    else:
                        # 对于价格列，使用已有价格列的平均值或创建随机值
                        existing_price_cols = [c for c in df.columns if c in ['open', 'high', 'low', 'close']]
                        if existing_price_cols:
                            df[col] = df[existing_price_cols[0]]
                        else:
                            df[col] = np.random.uniform(10, 100, len(df))
                    print(f"警告: 创建了假 '{col}' 列")

        # 添加股票代码列
        df['stock_code'] = stock_code

        # 添加技术指标
        try:
            df = add_technical_indicators(df)
        except Exception as e:
            print(f"添加技术指标失败 - {os.path.basename(file_path)}: {str(e)}")
            # 如果技术指标添加失败，继续处理下一个文件
            continue

        all_data.append(df)

    if not all_data:
        raise ValueError("处理完所有CSV文件后没有可用数据")

    # 合并所有数据
    merged_data = pd.concat(all_data, ignore_index=True)
    print(f"合并后数据形状: {merged_data.shape}")

    # 列出所有特征
    features = [col for col in merged_data.columns if col not in ['trade_date', 'stock_code', 'ts_code']]
    print(f"使用的特征 ({len(features)}): {features}")

    return merged_data, stock_codes, features

def create_sequences(data, features, target_col, seq_length):
    """创建时间序列数据，支持多特征"""
    xs, ys = [], []
    # 打印data的前5行
    print(data.head())

    # 检查data中是否有trade_date列
    if 'trade_date' not in data.columns:
        # 如果没有trade_date列，只按stock_code排序
        print("警告: 输入数据中没有'trade_date'列，仅按'stock_code'排序")
        data = data.sort_values('stock_code')
    else:
        # 按照代码和日期排序
        data = data.sort_values(['stock_code', 'trade_date'])

    # 按照代码分组
    stock_groups = data.groupby('stock_code')

    for _, stock_data in stock_groups:
        # 提取特征和目标 - 确保只使用数值型特征
        X = stock_data[features].values
        y = stock_data[target_col].values.reshape(-1, 1)

        # 为每支创建序列
        for i in range(len(X) - seq_length):
            xs.append(X[i:i+seq_length])
            ys.append(y[i+seq_length])

    return np.array(xs), np.array(ys)

def build_advanced_model(seq_length, n_features):
    """构建更高级的GRU+LSTM混合模型"""
    model = Sequential([
        # 第一层: 双向GRU层
        Bidirectional(GRU(units=64, return_sequences=True,
                         kernel_regularizer=tf.keras.regularizers.l2(0.001)),
                     input_shape=(seq_length, n_features)),
        BatchNormalization(),
        Dropout(0.3),

        # 第二层: LSTM层
        LSTM(units=64, return_sequences=True),
        BatchNormalization(),
        Dropout(0.3),

        # 第三层: GRU层
        GRU(units=32),
        BatchNormalization(),
        Dropout(0.3),

        # 全连接层
        Dense(units=16, activation='relu'),
        Dense(units=1)
    ])

    # 使用Adam优化器，学习率设置较小以提高稳定性
    optimizer = Adam(learning_rate=0.0005)
    model.compile(optimizer=optimizer, loss=tf.keras.losses.MeanSquaredError())

    print(model.summary())
    return model

def train_model(data, features, target_col='close', seq_length=20, epochs=100, batch_size=32):
    """训练单个模型处理所有数据"""
    # 确保特征列表中只包含数值型特征
    numeric_features = []
    for feature in features:
        if pd.api.types.is_numeric_dtype(data[feature]):
            numeric_features.append(feature)
        else:
            print(f"警告: 跳过非数值型特征 '{feature}'")

    if len(numeric_features) == 0:
        raise ValueError("没有可用的数值型特征进行训练")

    # 如果目标列不在特征列表中，检查它是否为数值型
    if target_col not in numeric_features and not pd.api.types.is_numeric_dtype(data[target_col]):
        raise ValueError(f"目标列 '{target_col}' 不是数值型")

    print(f"使用 {len(numeric_features)} 个数值型特征: {numeric_features}")

    # 提取特征和目标变量
    X_data = data[numeric_features].values
    y_data = data[target_col].values.reshape(-1, 1)

    # 数据归一化
    feature_scaler = MinMaxScaler()
    target_scaler = MinMaxScaler()

    X_scaled = feature_scaler.fit_transform(X_data)
    y_scaled = target_scaler.fit_transform(y_data)

    # 创建包含所有处理后数据的DataFrame
    processed_data = pd.DataFrame(X_scaled, columns=numeric_features)
    processed_data['close'] = y_scaled
    processed_data['stock_code'] = data['stock_code'].values

    # 检查并添加trade_date列，如果原始数据中存在
    if 'trade_date' in data.columns:
        processed_data['trade_date'] = data['trade_date'].values
    else:
        print("警告: 原始数据中没有'trade_date'列，将使用索引替代")

    # 创建序列
    X, y = create_sequences(processed_data, numeric_features, target_col='close', seq_length=seq_length)

    print(f"序列数据形状: X={X.shape}, y={y.shape}")

    # 划分训练集和测试集
    train_size = int(len(X) * 0.8)
    X_train, X_test = X[:train_size], X[train_size:]
    y_train, y_test = y[:train_size], y[train_size:]

    # 构建并训练模型
    model = build_advanced_model(seq_length, len(numeric_features))

    # 添加回调函数
    callbacks = [
        EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True),
        ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, min_lr=0.00001)
    ]

    history = model.fit(
        X_train, y_train,
        epochs=epochs,
        batch_size=batch_size,
        validation_split=0.2,
        callbacks=callbacks,
        verbose=1
    )

    # 预测
    y_pred = model.predict(X_test)

    # 反归一化 - 修复可能的索引错误
    try:
        target_idx = numeric_features.index(target_col)

        # 预测结果反归一化
        dummy = np.zeros((y_test.shape[0], len(numeric_features)))
        dummy[:, target_idx] = y_test.flatten()
        y_test_inv = target_scaler.inverse_transform(dummy)[:, target_idx]

        dummy = np.zeros((y_pred.shape[0], len(numeric_features)))
        dummy[:, target_idx] = y_pred.flatten()
        y_pred_inv = target_scaler.inverse_transform(dummy)[:, target_idx]
    except ValueError:
        # 如果目标列不在数值特征列表中（但仍然是数值型），使用单独的方式反归一化
        print(f"目标列 '{target_col}' 不在数值特征列表中，使用直接反归一化")
        y_test_inv = target_scaler.inverse_transform(y_test).flatten()
        y_pred_inv = target_scaler.inverse_transform(y_pred).flatten()

    # 计算评估指标
    mse = mean_squared_error(y_test_inv, y_pred_inv)
    rmse = np.sqrt(mse)
    mae = mean_absolute_error(y_test_inv, y_pred_inv)
    r2 = r2_score(y_test_inv, y_pred_inv)

    print(f"评估指标:")
    print(f"  RMSE: {rmse:.4f}")
    print(f"  MAE: {mae:.4f}")
    print(f"  R²: {r2:.4f}")

    # 可视化总体预测结果
    plt.figure(figsize=(14, 7))
    plt.subplot(2, 1, 1)
    plt.plot(y_test_inv, label='really', color='blue')
    plt.plot(y_pred_inv, label='predview', color='red')
    plt.title(f'GRU-LSTM模型总体预测结果 (RMSE: {rmse:.4f})')
    plt.xlabel('index')
    plt.ylabel('volume')
    plt.legend()

    # 绘制最后100个点，更清晰地查看预测结果
    plt.subplot(2, 1, 2)
    last_n = min(100, len(y_test_inv))
    plt.plot(y_test_inv[-last_n:], label='really', color='blue')
    plt.plot(y_pred_inv[-last_n:], label='predview', color='red')
    plt.title(f'最后{last_n}个样本点预测结果对比')
    plt.xlabel('index')
    plt.ylabel('volume')
    plt.legend()
    plt.tight_layout()

    # 确保图表目录存在
    img_dir = 'prediction_images'
    os.makedirs(img_dir, exist_ok=True)
    plt.savefig(f'{img_dir}/overall_prediction.png')
    plt.close()

    # 绘制损失曲线
    plt.figure(figsize=(10, 6))
    plt.plot(history.history['loss'], label='train_loss')
    plt.plot(history.history['val_loss'], label='verify_loss')
    plt.title('模型训练过程损失曲线')
    plt.xlabel('Epochs')
    plt.ylabel('MSE')
    plt.legend()
    plt.grid(True)
    plt.savefig(f'{img_dir}/training_loss.png')
    plt.close()

    # 特征重要性分析 - 方法: 逐个特征扰动测试
    print("计算特征重要性...")
    feature_importance = {}

    baseline_mse = mse
    for i, feature in enumerate(numeric_features):
        # 复制测试数据
        X_test_perturbed = X_test.copy()

        # 随机打乱特定特征
        np.random.shuffle(X_test_perturbed[:, :, i])

        # 预测并计算MSE
        y_pred_perturbed = model.predict(X_test_perturbed)

        # 反归一化
        try:
            dummy = np.zeros((y_pred_perturbed.shape[0], len(numeric_features)))
            dummy[:, target_idx] = y_pred_perturbed.flatten()
            y_pred_perturbed_inv = target_scaler.inverse_transform(dummy)[:, target_idx]
        except (ValueError, NameError):
            y_pred_perturbed_inv = target_scaler.inverse_transform(y_pred_perturbed).flatten()

        # 计算扰动后的MSE
        perturbed_mse = mean_squared_error(y_test_inv, y_pred_perturbed_inv)

        # 计算重要性 - MSE增加的百分比
        importance = (perturbed_mse - baseline_mse) / baseline_mse * 100
        feature_importance[feature] = max(0, importance)  # 只考虑正影响

    # 绘制特征重要性
    plt.figure(figsize=(14, 6))
    sorted_features = sorted(feature_importance.items(), key=lambda x: x[1], reverse=True)
    feature_names = [x[0] for x in sorted_features]
    importance_values = [x[1] for x in sorted_features]

    plt.barh(feature_names, importance_values)
    plt.title('特征重要性分析 (基于扰动测试)')
    plt.xlabel('重要性分数 (%MSE增加)')
    plt.tight_layout()
    plt.savefig(f'{img_dir}/feature_importance.png')
    plt.close()

    # 打印前5个最重要的特征
    top_n = min(5, len(sorted_features))
    print(f"前{top_n}个最重要的特征:")
    for feature, importance in sorted_features[:top_n]:
        print(f"  {feature}: {importance:.2f}%")

    return model, feature_scaler, target_scaler, {
        'rmse': rmse,
        'mae': mae,
        'r2': r2,
        'feature_importance': feature_importance
    }, history

def main():
    # 获取并合并所有CSV数据
    data_dir = os.path.join('tushare_data')
    if not os.path.exists(data_dir):
        print(f"错误: 未找到数据目录 {data_dir}")
        return

    try:
        # 加载并合并所有数据
        all_data, stock_codes, features = load_all_data(data_dir)

        # 排除非数值特征和不需要的特征列
        valid_features = []
        for feature in features:
            if feature != 'close' and pd.api.types.is_numeric_dtype(all_data[feature]):
                valid_features.append(feature)
            elif not pd.api.types.is_numeric_dtype(all_data[feature]):
                print(f"跳过非数值特征: {feature}")

        print(f"使用 {len(valid_features)} 个有效特征")

        # 训练单个模型
        model, feature_scaler, target_scaler, metrics, history = train_model(
            all_data,
            features=valid_features,
            target_col='close',
            seq_length=20,  # 增加序列长度以捕获更长期的模式
            epochs=200,
            batch_size=64   # 增加批次大小以提高训练稳定性
        )

        # 保存模型和缩放器
        models_dir = 'saved_models'
        os.makedirs(models_dir, exist_ok=True)

        model.save(f'{models_dir}/gru_lstm_model.keras')
        print(f"模型已保存到 {models_dir}/gru_lstm_model.keras")

        # 保存结果到CSV
        top_features = dict(sorted(metrics['feature_importance'].items(),
                                   key=lambda x: x[1], reverse=True)[:5])
        top_features_str = ', '.join([f"{k}: {v:.2f}%" for k, v in top_features.items()])

        results = {
            '模型': 'GRU-LSTM混合模型',
            'RMSE': metrics['rmse'],
            'MAE': metrics['mae'],
            'R²': metrics['r2'],
            '训练样本数': len(all_data),
            '特征数量': len(valid_features),
            '数量': len(stock_codes),
            '列表': ','.join(stock_codes),
            '最重要特征': top_features_str
        }

        results_df = pd.DataFrame([results])
        results_df.to_csv('gru_results.csv', index=False)
        print(f"结果已保存到 gru_results.csv: \n{results_df}")

        # 显示最终的损失图表
        plt.figure(figsize=(12, 6))
        plt.plot(history.history['loss'], label='train_loss')
        plt.plot(history.history['val_loss'], label='verify_loss')
        plt.title('GRU-LSTM模型训练过程')
        plt.xlabel('Epochs')
        plt.ylabel('loss')
        plt.legend()
        plt.grid(True)
        plt.show()

        # 打印优化建议
        print("\n=== 股价预测模型优化建议 ===")
        print("1. 数据相关:")
        print("   - 增加历史数据长度，至少需要2-3年的数据才能捕获季节性模式")
        print("   - 考虑添加外部因素（如大盘指数、行业指数、宏观经济指标等）")
        print("   - 增加更多数据进行联合训练，提高模型泛化能力")

        print("\n2. 特征工程:")
        print("   - 关注特征重要性分析，移除不重要的特征")
        print("   - 考虑添加基本面数据（市盈率、市净率等）")
        print("   - 尝试不同的技术指标组合和参数设置")

        print("\n3. 模型架构:")
        print("   - 尝试更深层次的网络或Transformer架构")
        print("   - 考虑加入注意力机制以关注关键时间点")
        print("   - 实现集成学习方法，融合多个模型的预测结果")

        print("\n4. 训练策略:")
        print("   - 尝试不同的损失函数（如Huber损失）以减少异常值影响")
        print("   - 探索更复杂的学习率调度策略")
        print("   - 考虑使用Bayesian优化自动调整超参数")

        print("\n5. 预测框架:")
        print("   - 改为多步预测而非单步预测")
        print("   - 尝试分类+回归混合模型（预测方向+幅度）")
        print("   - 实现概率预测以量化不确定性")

    except Exception as e:
        print(f"处理过程中出错: {str(e)}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()
