import tushare as ts
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest, f_classif
from imblearn.over_sampling import SMOTE
from talib import abstract
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix, roc_curve, auc

# 1. 数据获取
def fetch_stock_data():
    pro = ts.pro_api('1c7f85b9026518588c0d0cdac712c2d17344332c9c8cfe6bc83ee75c')
    stock_list = ['920819.BJ', '920799.BJ', '920682.BJ', '920489.BJ', '920445.BJ',
                  '920167.BJ', '920128.BJ', '920118.BJ', '920116.BJ', '920111.BJ']

    end_date = pd.to_datetime('today').strftime('%Y%m%d')
    start_date = (pd.to_datetime('today') - pd.DateOffset(years=2)).strftime('%Y%m%d')

    all_data = pd.DataFrame()
    for stock in stock_list:
        df = pro.daily(ts_code=stock, start_date=start_date, end_date=end_date)
        df['trade_date'] = pd.to_datetime(df['trade_date'])
        df = df.sort_values('trade_date')
        all_data = pd.concat([all_data, df], ignore_index=True)

    return all_data

# 2. 特征工程与标签
def feature_engineering(all_data):
    # 计算技术指标
    def calculate_technical_indicators(df):
        df = df.sort_values('trade_date')
        ohlc = {
            'open': df['open'].values,
            'high': df['high'].values,
            'low': df['low'].values,
            'close': df['close'].values,
            'volume': df['vol'].values
        }

        df['MA5'] = abstract.SMA(ohlc, timeperiod=5)
        df['MA10'] = abstract.SMA(ohlc, timeperiod=10)
        df['MA20'] = abstract.SMA(ohlc, timeperiod=20)
        df['RSI'] = abstract.RSI(ohlc, timeperiod=14)
        df['MACD'], df['MACDsignal'], df['MACDhist'] = abstract.MACD(ohlc)
        df['BB_upper'], df['BB_middle'], df['BB_lower'] = abstract.BBANDS(ohlc)
        df['ADX'] = abstract.ADX(ohlc)
        df['OBV'] = abstract.OBV(ohlc)
        df['CCI'] = abstract.CCI(ohlc)
        df['STOCH_k'], df['STOCH_d'] = abstract.STOCH(ohlc)
        df['ATR'] = abstract.ATR(ohlc)
        df['WILLR'] = abstract.WILLR(ohlc)
        df['ULTOSC'] = abstract.ULTOSC(ohlc)
        df['ROC'] = abstract.ROC(ohlc)
        df['MOM'] = abstract.MOM(ohlc)
        return df

    all_data = all_data.groupby('ts_code').apply(calculate_technical_indicators)

    # 打标签
    def label_data(group):
        group['future_5d_return'] = group['close'].pct_change(5).shift(-5)
        group['label'] = np.where(group['future_5d_return'] > 0.02, 1,
                                 np.where(group['future_5d_return'] < -0.02, -1, 0))
        return group

    all_data = all_data.groupby('ts_code').apply(label_data)
    return all_data

# 3. 数据预处理与分析
def data_preprocessing_analysis(all_data):
    # 处理空值
    all_data = all_data.dropna()

    # 异常值处理
    def handle_outliers(df, columns):
        for col in columns:
            q1 = df[col].quantile(0.25)
            q3 = df[col].quantile(0.75)
            iqr = q3 - q1
            lower_bound = q1 - 1.5 * iqr
            upper_bound = q3 + 1.5 * iqr
            df[col] = np.where(df[col] < lower_bound, lower_bound,
                              np.where(df[col] > upper_bound, upper_bound, df[col]))
        return df

    numeric_cols = all_data.select_dtypes(include=[np.number]).columns.tolist()
    numeric_cols.remove('label')
    all_data = handle_outliers(all_data, numeric_cols)

    # 归一化
    scaler = MinMaxScaler()
    features = all_data[numeric_cols].drop(['future_5d_return'], axis=1, errors='ignore')
    scaled_features = scaler.fit_transform(features)
    scaled_df = pd.DataFrame(scaled_features, columns=features.columns, index=features.index)

    all_data_scaled = all_data.copy()
    all_data_scaled[features.columns] = scaled_df

    # 主成分分析
    pca = PCA(n_components=0.95)
    pca_features = pca.fit_transform(scaled_features)

    print(f"原始特征数量: {scaled_features.shape[1]}")
    print(f"PCA后特征数量: {pca_features.shape[1]}")

    # 可视化PCA解释方差
    plt.figure(figsize=(10, 6))
    plt.plot(np.cumsum(pca.explained_variance_ratio_))
    plt.xlabel('Number of Components')
    plt.ylabel('Cumulative Explained Variance')
    plt.title('PCA Explained Variance')
    plt.show()

    # 相关性分析
    corr_matrix = scaled_df.corr()
    plt.figure(figsize=(16, 12))
    sns.heatmap(corr_matrix, annot=False, cmap='coolwarm', center=0)
    plt.title('Feature Correlation Matrix')
    plt.show()

    # 个股画像
    profile_features = ['close', 'vol', 'RSI', 'MACD', 'OBV', 'CCI']

    def plot_stock_profile(ts_code):
        stock_data = all_data_scaled[all_data_scaled['ts_code'] == ts_code]
        if len(stock_data) == 0:
            return

        plt.figure(figsize=(12, 8))
        for i, feature in enumerate(profile_features, 1):
            plt.subplot(3, 2, i)
            plt.plot(stock_data['trade_date'], stock_data[feature])
            plt.title(f'{feature} - {ts_code}')
            plt.xticks(rotation=45)
        plt.tight_layout()
        plt.show()

    for stock in all_data['ts_code'].unique()[:3]:
        plot_stock_profile(stock)

    # 数据均衡
    X = scaled_features
    y = all_data['label']

    smote = SMOTE(random_state=42)
    X_resampled, y_resampled = smote.fit_resample(X, y)

    balanced_dist = pd.Series(y_resampled).value_counts(normalize=True)
    print("均衡后标签分布:\n", balanced_dist)

    plt.figure(figsize=(8, 5))
    balanced_dist.plot(kind='bar')
    plt.title('Balanced Label Distribution')
    plt.xlabel('Label')
    plt.ylabel('Percentage')
    plt.show()

    return X_resampled, y_resampled, all_data_scaled

# 4. 机器学习建模与评价
def model_building_and_evaluation(X, y):
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

    # 初始化逻辑回归模型
    model = LogisticRegression(max_iter=1000)

    # 训练模型
    model.fit(X_train, y_train)

    # 预测
    y_pred = model.predict(X_test)

    # 模型评价
    report = classification_report(y_test, y_pred)
    print("分类报告:\n", report)

    # 混淆矩阵
    cm = confusion_matrix(y_test, y_pred)
    plt.figure(figsize=(8, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
    plt.title('Confusion Matrix')
    plt.xlabel('Predicted Label')
    plt.ylabel('True Label')
    plt.show()

    # ROC曲线
    y_probs = model.predict_proba(X_test)
    classes = np.unique(y)
    for i, cls in enumerate(classes):
        fpr, tpr, thresholds = roc_curve(y_test, y_probs[:, i], pos_label=cls)
        roc_auc = auc(fpr, tpr)
        plt.plot(fpr, tpr, label=f'Class {cls} (AUC = {roc_auc:.2f})')

    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('ROC Curve')
    plt.legend()
    plt.show()

    return model

# 主流程
def main():
    print("1. 获取股票数据...")
    all_data = fetch_stock_data()

    print("\n2. 特征工程与标签...")
    all_data = feature_engineering(all_data)

    print("\n3. 数据预处理与分析...")
    X, y, processed_data = data_preprocessing_analysis(all_data)

    print("\n4. 机器学习建模与评价...")
    model = model_building_and_evaluation(X, y)

    print("\n数据处理和建模完成!")
    return X, y, processed_data, model

if __name__ == "__main__":
    X, y, processed_data, model = main()
    