import os
import tushare as ts
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.utils import resample
import warnings

warnings.filterwarnings('ignore')
plt.style.use('seaborn')
sns.set_palette('husl')
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
ts.set_token('1c7f85b9026518588c0d0cdac712c2d17344332c9c8cfe6bc83ee75c')
pro = ts.pro_api()
# 1. 数据获取模块
def get_stock_data():
    """
    从Tushare获取至少10支股票两年的日线行情数据
    返回: 包含多支股票数据的字典
    """
    # 获取沪深300成分股作为股票池 - 使用新API接口
    try:
        # 方法1: 使用index_weight接口获取沪深300成分股
        hs300 = pro.index_weight(index_code='000300.SH',
                                 start_date='20200101',
                                 end_date=pd.to_datetime('today').strftime('%Y%m%d'))

        # 获取最新的成分股列表
        latest_date = hs300['trade_date'].max()
        stock_codes = hs300[hs300['trade_date'] == latest_date]['con_code'].unique().tolist()

        # 如果没有数据，使用备选方法
        if len(stock_codes) == 0:
            # 方法2: 使用hs_const接口获取沪深300成分股
            hs300 = pro.hs_const(hs_type='SH')
            stock_codes = hs300['ts_code'].tolist()

        # 取前15支以防有的股票数据不全
        stock_codes = stock_codes[:15]
    except Exception as e:
        print(f"获取成分股失败: {e}")
        # 备选股票列表
        stock_codes = [
            '600519.SH', '000858.SZ', '601318.SH',
            '600036.SH', '000333.SZ', '601888.SH',
            '600276.SH', '600887.SH', '601398.SH',
            '601288.SH', '601988.SH', '601628.SH',
            '601668.SH', '601166.SH', '600030.SH'
        ]

    # 设置日期范围 (两年数据)
    end_date = pd.to_datetime('today').strftime('%Y%m%d')
    start_date = (pd.to_datetime(end_date) - pd.DateOffset(years=2)).strftime('%Y%m%d')

    stock_data = {}
    for code in stock_codes:
        try:
            df = pro.daily(ts_code=code, start_date=start_date, end_date=end_date)
            if len(df) >= 400:  # 确保有足够的数据点(约2年交易日)
                stock_data[code] = df
                print(f"成功获取 {code} 数据，共 {len(df)} 条记录")
            if len(stock_data) >= 10:  # 获取10支股票后停止
                break
        except Exception as e:
            print(f"获取 {code} 数据失败: {e}")

    return stock_data


# 2. 数据预处理与技术指标计算模块
def preprocess_and_calculate_indicators(stock_data):
    """
    对每支股票数据进行预处理并计算技术指标（不依赖ta库的版本）
    """
    processed_data = {}

    for code, df in stock_data.items():
        try:
            # 转换日期格式并设为索引
            df['trade_date'] = pd.to_datetime(df['trade_date'], format='%Y%m%d')
            df.set_index('trade_date', inplace=True)
            df.sort_index(inplace=True)

            # 计算收益率
            df['return'] = df['close'].pct_change()

            # 1. 移动平均线
            df['ma5'] = df['close'].rolling(5).mean()
            df['ma10'] = df['close'].rolling(10).mean()
            df['ma20'] = df['close'].rolling(20).mean()
            df['ma60'] = df['close'].rolling(60).mean()

            # 2. 布林带
            df['middle_band'] = df['close'].rolling(20).mean()
            df['upper_band'] = df['middle_band'] + 2 * df['close'].rolling(20).std()
            df['lower_band'] = df['middle_band'] - 2 * df['close'].rolling(20).std()

            # 3. RSI (相对强弱指数)
            delta = df['close'].diff()
            gain = (delta.where(delta > 0, 0)).rolling(14).mean()
            loss = (-delta.where(delta < 0, 0)).rolling(14).mean()
            rs = gain / loss
            df['rsi'] = 100 - (100 / (1 + rs))

            # 4. MACD
            exp12 = df['close'].ewm(span=12, adjust=False).mean()
            exp26 = df['close'].ewm(span=26, adjust=False).mean()
            df['macd'] = exp12 - exp26
            df['macd_signal'] = df['macd'].ewm(span=9, adjust=False).mean()

            # 5. 随机指标
            low_min = df['low'].rolling(14).min()
            high_max = df['high'].rolling(14).max()
            df['stoch_k'] = 100 * (df['close'] - low_min) / (high_max - low_min)
            df['stoch_d'] = df['stoch_k'].rolling(3).mean()

            # 6. ATR (平均真实波幅)
            df['high_low'] = df['high'] - df['low']
            df['high_close'] = np.abs(df['high'] - df['close'].shift())
            df['low_close'] = np.abs(df['low'] - df['close'].shift())
            df['true_range'] = df[['high_low', 'high_close', 'low_close']].max(axis=1)
            df['atr'] = df['true_range'].rolling(14).mean()

            # 7. OBV (能量潮)
            df['obv'] = (np.sign(df['close'].diff()) * df['vol']).fillna(0).cumsum()

            # 删除计算过程中产生的NA值
            df.dropna(inplace=True)

            processed_data[code] = df
            print(f"成功处理 {code} 数据，剩余 {len(df)} 条记录")
        except Exception as e:
            print(f"处理 {code} 数据时出错: {e}")

    return processed_data


# 3. 数据标签分类模块
def label_data(processed_data):
    """
    对股票数据进行分类标签
    参数: processed_data - 处理后的股票数据字典
    返回: 带标签的数据字典
    """
    labeled_data = {}

    for code, df in processed_data.items():
        try:
            # 基于未来5日收益率打标签
            future_return = df['return'].rolling(5).sum().shift(-5)

            # 三分位分类: -1(下跌), 0(平盘), 1(上涨)
            df['label'] = pd.qcut(future_return, q=3, labels=[-1, 0, 1])

            # 删除最后的5条记录(因为没有未来数据)
            df = df.iloc[:-5]

            labeled_data[code] = df
            print(f"成功为 {code} 数据打标签，标签分布:\n{df['label'].value_counts()}")
        except Exception as e:
            print(f"为 {code} 数据打标签时出错: {e}")

    return labeled_data


# 4. 数据合并与分析模块
def merge_and_analyze_data(labeled_data):
    """
    合并所有股票数据并进行初步分析
    参数: labeled_data - 带标签的股票数据字典
    返回: 合并后的DataFrame
    """
    # 合并所有股票数据并添加股票代码列
    all_data = pd.concat(labeled_data.values(), keys=labeled_data.keys())
    all_data.reset_index(level=0, inplace=True)
    all_data.rename(columns={'level_0': 'stock_code'}, inplace=True)

    # 数据概览
    print("\n合并后数据概览:")
    print(f"总记录数: {len(all_data)}")
    print(f"特征数量: {len(all_data.columns)}")
    print("标签分布:")
    print(all_data['label'].value_counts(normalize=True))

    return all_data


# 5. 数据清洗与特征工程模块
def clean_and_engineer_features(all_data):
    """
    数据清洗与特征工程
    参数: all_data - 合并后的数据
    返回: 清洗后的数据和特征列表
    """
    # 选择实际计算出的技术指标作为特征
    technical_indicators = [
        'return', 'ma5', 'ma10', 'ma20', 'ma60',
        'middle_band', 'upper_band', 'lower_band',
        'rsi', 'macd', 'macd_signal',
        'stoch_k', 'stoch_d', 'atr', 'obv'
    ]

    # 确保只选择存在于数据中的特征
    existing_features = [f for f in technical_indicators if f in all_data.columns]

    # 添加股票代码的one-hot编码
    stock_dummies = pd.get_dummies(all_data['stock_code'], prefix='stock')

    # 合并特征 - 先选取技术指标，再合并one-hot编码
    features = pd.concat([all_data[existing_features], stock_dummies], axis=1)
    labels = all_data['label']

    # 更新selected_features列表
    selected_features = existing_features + stock_dummies.columns.tolist()

    # 处理异常值 - 使用3σ原则（仅对数值型特征）
    numeric_features = existing_features  # 这些是我们之前选择的技术指标
    for col in numeric_features:
        mean = features[col].mean()
        std = features[col].std()
        features[col] = np.where(
            features[col] > mean + 3 * std,
            mean + 3 * std,
            np.where(
                features[col] < mean - 3 * std,
                mean - 3 * std,
                features[col]
            )
        )

    # 归一化处理 (MinMax归一化，仅对数值型特征)
    scaler = MinMaxScaler()
    features[numeric_features] = scaler.fit_transform(features[numeric_features])

    # 处理空值 - 用列均值填充
    features.fillna(features.mean(), inplace=True)

    return features, labels, selected_features


# 6. 数据分析与可视化模块
def analyze_and_visualize(features, labels, selected_features):
    """
    数据分析与可视化
    参数:
        features - 特征数据
        labels - 标签数据
        selected_features - 选择的特征列表
    """
    # 创建images目录保存可视化结果
    os.makedirs('images', exist_ok=True)

    # 1. 标签分布可视化
    plt.figure(figsize=(10, 6))
    labels.value_counts().plot(kind='bar')
    plt.title('Label Distribution')
    plt.xlabel('Label')
    plt.ylabel('Count')
    plt.savefig('images/label_distribution.png')
    plt.close()

    # 2. 相关性分析 - 只对数值型特征进行
    numeric_features = [f for f in selected_features if f in features.columns and not f.startswith('stock_')]
    if len(numeric_features) > 1:
        plt.figure(figsize=(16, 12))
        corr_matrix = features[numeric_features].corr()
        sns.heatmap(corr_matrix, annot=False, cmap='coolwarm', center=0)
        plt.title('Feature Correlation Matrix')
        plt.savefig('images/correlation_matrix.png')
        plt.close()

    # 3. 主成分分析 (PCA)
    if len(numeric_features) >= 2:
        pca = PCA(n_components=2)
        principal_components = pca.fit_transform(features[numeric_features])
        pca_df = pd.DataFrame(data=principal_components, columns=['PC1', 'PC2'])
        pca_df['Label'] = labels.reset_index(drop=True)

        plt.figure(figsize=(10, 8))
        sns.scatterplot(x='PC1', y='PC2', hue='Label', data=pca_df, palette='viridis')
        plt.title('PCA of Stock Features')
        plt.savefig('images/pca_visualization.png')
        plt.close()

        # 4. 解释方差比
        pca_full = PCA().fit(features[numeric_features])
        plt.figure(figsize=(10, 6))
        plt.plot(np.cumsum(pca_full.explained_variance_ratio_))
        plt.xlabel('Number of Components')
        plt.ylabel('Cumulative Explained Variance')
        plt.title('Explained Variance by Different Principal Components')
        plt.savefig('images/pca_explained_variance.png')
        plt.close()

    # 5. 个股画像 (选取6个关键指标)
    # 动态获取股票代码列
    stock_columns = [col for col in features.columns if col.startswith('stock_')]

    if stock_columns:
        # 获取第一个股票代码列
        stock_col = stock_columns[0]
        # 找到该列值为1的股票代码
        sample_stock = stock_col.replace('stock_', '')

        # 选择该股票的数据（股票代码列值为1）
        stock_mask = features[stock_col] == 1
        if stock_mask.any():
            # 选择前6个数值型特征
            plot_features = numeric_features[:6] if len(numeric_features) >= 6 else numeric_features

            if plot_features:
                plt.figure(figsize=(12, 8))
                for i, col in enumerate(plot_features):
                    plt.subplot(2, 3, i + 1)
                    sns.histplot(features.loc[stock_mask, col], kde=True)
                    plt.title(col)
                plt.tight_layout()
                plt.suptitle(f'Feature Distribution for {sample_stock}', y=1.02)
                plt.savefig('images/stock_profile.png')
                plt.close()
            else:
                print("警告: 没有足够的数值型特征用于个股画像")
        else:
            print(f"警告: 股票 {sample_stock} 在数据中不存在")
    else:
        print("警告: 没有找到股票代码列，跳过个股画像")


# 7. 数据均衡与降维模块
def balance_and_reduce_dimensions(features, labels):
    """
    数据均衡与降维处理
    参数:
        features - 特征数据
        labels - 标签数据
    返回:
        处理后的特征和标签
    """
    # 合并特征和标签
    data = pd.concat([features, labels], axis=1)

    # 数据均衡 - 对少数类进行上采样
    class_counts = data['label'].value_counts()
    max_size = class_counts.max()

    balanced_data = pd.DataFrame()
    for class_val in class_counts.index:
        class_data = data[data['label'] == class_val]
        if len(class_data) < max_size:
            # 上采样
            class_data = resample(class_data,
                                  replace=True,
                                  n_samples=max_size,
                                  random_state=42)
        balanced_data = pd.concat([balanced_data, class_data])

    # 分离特征和标签
    balanced_features = balanced_data.drop('label', axis=1)
    balanced_labels = balanced_data['label']

    # 使用PCA降维 (保留95%的方差)
    pca = PCA(n_components=0.95)
    reduced_features = pca.fit_transform(balanced_features)

    print(f"\n降维后特征数量: {reduced_features.shape[1]}")
    print(f"解释方差比例: {sum(pca.explained_variance_ratio_):.2f}")

    return reduced_features, balanced_labels


# 8. 建模与评估模块
def train_and_evaluate(features, labels):
    """
    训练模型并评估
    参数:
        features - 特征数据
        labels - 标签数据
    """
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        features, labels, test_size=0.3, random_state=42, stratify=labels
    )

    # 初始化随机森林分类器
    rf = RandomForestClassifier(
        n_estimators=100,
        max_depth=10,
        min_samples_split=5,
        min_samples_leaf=2,
        random_state=42,
        class_weight='balanced'
    )

    # 训练模型
    rf.fit(X_train, y_train)

    # 预测
    y_pred = rf.predict(X_test)

    # 评估结果
    print("\n模型评估结果:")
    print(classification_report(y_test, y_pred))

    # 可视化评估结果
    plt.figure(figsize=(10, 8))
    cm = confusion_matrix(y_test, y_pred)
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=['下跌', '平盘', '上涨'],
                yticklabels=['下跌', '平盘', '上涨'])
    plt.title('Confusion Matrix')
    plt.xlabel('Predicted')
    plt.ylabel('Actual')
    plt.savefig('images/confusion_matrix.png')
    plt.close()

    # 特征重要性
    if hasattr(rf, 'feature_importances_'):
        importances = rf.feature_importances_
        indices = np.argsort(importances)[::-1]

        plt.figure(figsize=(12, 8))
        plt.title('Feature Importances')
        plt.bar(range(min(20, len(importances))), importances[indices][:20], align='center')
        plt.xticks(range(min(20, len(importances))), indices[:20])
        plt.xlim([-1, 20])
        plt.savefig('images/feature_importances.png')
        plt.close()


# 主函数
def main():
    # 1. 获取数据
    print("正在获取股票数据...")
    stock_data = get_stock_data()

    # 2. 预处理和计算技术指标
    print("\n正在预处理数据并计算技术指标...")
    processed_data = preprocess_and_calculate_indicators(stock_data)

    # 3. 数据标签分类
    print("\n正在为数据打标签...")
    labeled_data = label_data(processed_data)

    # 4. 合并数据
    print("\n正在合并所有股票数据...")
    all_data = merge_and_analyze_data(labeled_data)

    # 5. 数据清洗与特征工程
    print("\n正在进行数据清洗与特征工程...")
    features, labels, selected_features = clean_and_engineer_features(all_data)

    # 6. 数据分析与可视化
    print("\n正在进行数据分析与可视化...")
    analyze_and_visualize(features, labels, selected_features)

    # 7. 数据均衡与降维
    print("\n正在进行数据均衡与降维...")
    reduced_features, balanced_labels = balance_and_reduce_dimensions(features, labels)

    # 8. 建模与评估
    print("\n正在训练模型并评估...")
    train_and_evaluate(reduced_features, balanced_labels)

    print("\n所有流程完成! 请查看images文件夹中的可视化结果。")


if __name__ == "__main__":
    main()