import pandas as pd
import numpy as np
import talib
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
from copy import deepcopy
import tushare as ts  # 导入 tushare
from datetime import datetime, timedelta

# 初始化 tushare pro 接口
pro = ts.pro_api('1c7f85b9026518588c0d0cdac712c2d17344332c9c8cfe6bc83ee75c')  # 替换为你的 tushare token

# 1. 获取股票列表
def get_stock_list():
    """
    获取沪深两市上市股票列表，筛选至少上市2年以上的股票，并随机选取10支。
    """
    df = pro.stock_basic(exchange='', list_status='L',
                         fields='ts_code,symbol,name,area,industry,list_date')
    two_years_ago = (datetime.now() - timedelta(days=365 * 2)).strftime('%Y%m%d')
    df = df[df['list_date'] < two_years_ago]
    if len(df) >= 10:
        return df.sample(10)['ts_code'].tolist()  # 随机选取10支
    else:
        return df['ts_code'].tolist()  # 如果不足10支则返回全部

# 2. 获取日线数据
def get_daily_data(stock_list):
    """
    获取指定股票列表的最近两年日线数据。
    """
    end_date = datetime.now().strftime('%Y%m%d')
    start_date = (datetime.now() - timedelta(days=365 * 2)).strftime('%Y%m%d')
    all_data = pd.DataFrame()
    for stock in stock_list:
        try:
            df = pro.daily(ts_code=stock, start_date=start_date, end_date=end_date)
            all_data = pd.concat([all_data, df])
            print(f"已获取 {stock} 的数据，共 {len(df)} 条记录")
        except Exception as e:
            print(f"获取 {stock} 数据失败: {e}")
    return all_data

# 3. 重新定义KLineProcessor类
class KLineProcessor:
    """输入一个含有low和high字段的时间序列进行顶分线和底分线打标
    """
    def __init__(self, df):
        """
        类的构造函数，初始化对象并进行一系列的数据处理操作。
        参数:
        df (pandas.DataFrame): 包含原始K线数据的DataFrame,必须包含high和low的成交价格
        """
        self._validate_input(df)
        self.data = df
        self.kline = None
        self.fractals = None
        self.processed_kline = None
        self.line = None

    @staticmethod
    def _validate_input(df: pd.DataFrame):
        """输入数据校验"""
        required_cols = {'trade_date', 'high', 'low'}
        if not required_cols.issubset(df.columns):
            missing = required_cols - set(df.columns)
            raise ValueError(f"输入数据缺少必要列：{missing}")

    def _preprocess_data(self):
        """给原始数据加上需要求出的列设置为默认值"""
        self.data['Fmark'] = np.zeros(self.data.shape[0], dtype=int)
        self.data['Fval'] = np.zeros(self.data.shape[0], dtype=float)
        self.data['line'] = np.zeros(self.data.shape[0], dtype=float)
        return self.data

    def process_kline_fractals(self):
        """处理K线并识别分型"""
        self.kline = self._preprocess_data()
        kline = self.kline.to_dict('records')
        fractals = []
        for i in range(1, len(kline) - 1):
            k1, k2, k3 = kline[i - 1:i + 2]
            if k1['high'] < k2['high'] > k3['high']:
                k2['Fmark'] = 1
                k2['Fval'] = k2['high']
                fractals.append(k2)
            elif k1['low'] > k2['low'] < k3['low']:
                k2['Fmark'] = -1
                k2['Fval'] = k2['low']
                fractals.append(k2)
        self.processed_kline = kline
        self.fractals = fractals

    def getLine(self):
        """获取笔的端点"""
        if self.fractals is None:
            raise ValueError("Fractals not processed yet. Please run process_kline_fractals first.")
        line = []
        for i in range(len(self.fractals)):
            if i == 0:
                line.append(self.fractals[i])
            else:
                kpre = line[-1]
                kcur = self.fractals[i]
                if kpre['Fmark'] == kcur['Fmark']:
                    if (kpre['Fmark'] == 1 and kpre['Fval'] < kcur['Fval']) or \
                       (kpre['Fmark'] == -1 and kpre['Fval'] > kcur['Fval']):
                        line[-1] = kcur
                else:
                    line.append(kcur)
        return line

    def get_fractals(self):
        """获取分型并处理数据"""
        if self.line is None:
            self.line = self.getLine()
        if len(self.line) == 0:
            raise ValueError("No valid lines found. Cannot proceed with fractals processing.")
        colum_idx = self.data.columns.get_loc('trade_date')
        fmark_idx = self.data.columns.get_loc('Fmark')
        first_line_date = self.line[0]['trade_date']
        first_line_fmark = self.line[0]['Fmark']
        match_indices = np.where(self.data['trade_date'].values == first_line_date)[0]
        if len(match_indices) == 0:
            raise ValueError(f"No matching trade_date found for {first_line_date} in data.")
        first_match_index = match_indices[0]
        if first_line_fmark == -1:
            self.data.iloc[0:first_match_index, fmark_idx] = -2
        else:
            self.data.iloc[0:first_match_index, fmark_idx] = 2
        return self.data

    def get_data(self):
        """获取最终处理后的数据"""
        self.process_kline_fractals()
        self.line = self.getLine()
        self.fractals = self.get_fractals()
        return self.fractals

# 4. 数据预处理与技术指标计算
def prepare_data_with_indicators(stock_data):
    """
    准备数据并计算技术指标。
    """
    def calculate_technical_indicators(df):
        """
        计算技术指标。
        """
        close = df['close'].values
        high = df['high'].values
        low = df['low'].values
        volume = df['vol'].values

        df['MA5'] = talib.MA(close, timeperiod=5)
        df['MA10'] = talib.MA(close, timeperiod=10)
        df['MA20'] = talib.MA(close, timeperiod=20)
        df['EMA12'] = talib.EMA(close, timeperiod=12)
        df['EMA26'] = talib.EMA(close, timeperiod=26)
        df['MACD'], df['MACDsignal'], df['MACDhist'] = talib.MACD(close)
        df['RSI14'] = talib.RSI(close, timeperiod=14)
        df['RSI7'] = talib.RSI(close, timeperiod=7)
        df['STOCH_K'], df['STOCH_D'] = talib.STOCH(high, low, close)
        df['WILLR'] = talib.WILLR(high, low, close, timeperiod=14)
        df['CCI'] = talib.CCI(high, low, close, timeperiod=14)
        df['ADX'] = talib.ADX(high, low, close, timeperiod=14)
        df['ATR'] = talib.ATR(high, low, close, timeperiod=14)
        df['NATR'] = talib.NATR(high, low, close, timeperiod=14)
        df['OBV'] = talib.OBV(close, volume)
        df['AD'] = talib.AD(high, low, close, volume)
        df['SAR'] = talib.SAR(high, low)
        df['Bollinger_Upper'], df['Bollinger_Middle'], df['Bollinger_Lower'] = talib.BBANDS(close)
        df['PCT_CHG'] = df['close'].pct_change() * 100
        df['TARGET'] = (df['close'].shift(-3) > df['close']).astype(int)
        return df

    # 使用K线处理器进行分型标记
    processor = KLineProcessor(stock_data)
    labeled_data = processor.get_data()
    labeled_data = calculate_technical_indicators(labeled_data)
    return labeled_data

# 5. 数据预处理与分析
def preprocess_and_analyze(data):
    """
    数据预处理与分析。
    """
    df = data.copy()

    # 处理空值
    df = df.dropna()

    # 异常值处理 - 使用3σ原则
    numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
    for col in numeric_cols:
        if col not in ['Fmark', 'TARGET']:  # 不处理标签列
            mean = df[col].mean()
            std = df[col].std()
            df[col] = np.where(np.abs(df[col] - mean) > 3 * std, mean, df[col])

    # 数据归一化
    scaler = StandardScaler()
    feature_cols = [col for col in df.columns if col not in ['trade_date', 'ts_code', 'Fmark', 'TARGET']]
    df[feature_cols] = scaler.fit_transform(df[feature_cols])

    # 相关性分析
    plt.figure(figsize=(20, 15))
    corr_matrix = df[feature_cols].corr()
    sns.heatmap(corr_matrix, annot=False, cmap='coolwarm')
    plt.title('Feature Correlation Matrix')
    plt.show()

    # 主成分分析
    pca = PCA(n_components=0.95)  # 保留95%的方差
    pca_features = pca.fit_transform(df[feature_cols])
    print(f"原始特征数: {len(feature_cols)}, 降维后特征数: {pca.n_components_}")

    # 个股画像 - 选择6个关键指标
    profile_cols = ['MA20', 'RSI14', 'MACD', 'ATR', 'OBV', 'Bollinger_Upper']
    profile_df = df[['trade_date'] + profile_cols]

    # 可视化个股画像
    plt.figure(figsize=(15, 10))
    for i, col in enumerate(profile_cols, 1):
        plt.subplot(3, 2, i)
        plt.plot(profile_df['trade_date'], profile_df[col])
        plt.title(col)
        plt.xticks(rotation=45)
    plt.tight_layout()
    plt.show()

    # 检查类别分布
    X = df[feature_cols]
    y = df['TARGET']
    print("类别分布:\n", y.value_counts())

    return X, y, df, feature_cols

# 6. 建模与模型评价
def build_and_evaluate_model(X, y, feature_cols):
    """
    构建模型并进行评价。
    """
    # 划分训练测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

    # 使用随机森林分类器，添加class_weight参数处理不平衡
    model = RandomForestClassifier(n_estimators=100,
                                   random_state=42,
                                   class_weight='balanced')  # 关键修改
    model.fit(X_train, y_train)

    # 预测
    y_pred = model.predict(X_test)

    # 模型评价
    print("分类报告:\n", classification_report(y_test, y_pred))

    # 混淆矩阵可视化
    cm = confusion_matrix(y_test, y_pred)
    plt.figure(figsize=(8, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
    plt.title('Confusion Matrix')
    plt.xlabel('Predicted')
    plt.ylabel('Actual')
    plt.show()

    # 特征重要性可视化
    feature_imp = pd.Series(model.feature_importances_, index=feature_cols).sort_values(ascending=False)
    plt.figure(figsize=(12, 8))
    sns.barplot(x=feature_imp, y=feature_imp.index)
    plt.title('Feature Importance')
    plt.show()

    return model

# 主程序
if __name__ == '__main__':
    # 1. 获取股票列表
    stock_list = get_stock_list()
    print("选中的股票列表:", stock_list)

    # 2. 获取日线数据
    daily_data = get_daily_data(stock_list)
    daily_data.to_csv('stock_daily_data_2years.csv', index=False)
    print("数据已保存到 stock_daily_data_2years.csv")

    # 3. 数据预处理与技术指标计算
    processed_data = prepare_data_with_indicators(daily_data)
    processed_data.to_csv('processed_stock_data.csv', index=False)

    # 4. 数据预处理与分析
    X, y, full_df, feature_cols = preprocess_and_analyze(processed_data)

    # 5. 建模与模型评价
    model = build_and_evaluate_model(X, y, feature_cols)