import tushare as ts
import pandas as pd
from datetime import datetime, timedelta
import time
import matplotlib.pyplot as plt
import seaborn as sns
# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用黑体显示中文
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

# 初始化pro接口
pro = ts.pro_api('1c7f85b9026518588c0d0cdac712c2d17344332c9c8cfe6bc83ee75c')

# 获取当前日期和两年前的日期
end_date = datetime.now().strftime('%Y%m%d')
start_date = (datetime.now() - timedelta(days=730)).strftime('%Y%m%d')

# 获取股票列表(选取沪深300成分股作为样本池)
stock_list = pro.index_weight(index_code='000300.SH', start_date=start_date, end_date=end_date)
selected_stocks = stock_list['con_code'].unique()[:10]  # 选取前10只股票

# 获取每只股票的日线数据
all_data = pd.DataFrame()

for stock in selected_stocks:
    print(f"正在获取 {stock} 的数据...")
    df = pro.daily(ts_code=stock, start_date=start_date, end_date=end_date)
    all_data = pd.concat([all_data, df])
    time.sleep(0.5)  # 避免请求过于频繁
import numpy as np
import talib

# 加载数据
df = pd.read_csv('stock_data_2years_10stocks.csv')

# 按股票代码分组处理
grouped = df.groupby('ts_code')

# 初始化结果DataFrame
result_df = pd.DataFrame()

for name, group in grouped:
    group = group.sort_values('trade_date')

    # 计算技术指标 (至少15个)
    # 1. 移动平均线
    group['MA5'] = group['close'].rolling(5).mean()
    group['MA10'] = group['close'].rolling(10).mean()
    group['MA20'] = group['close'].rolling(20).mean()
    group['MA60'] = group['close'].rolling(60).mean()

    # 2. 指数移动平均线
    group['EMA12'] = talib.EMA(group['close'], timeperiod=12)
    group['EMA26'] = talib.EMA(group['close'], timeperiod=26)

    # 3. MACD
    group['MACD'], group['MACDsignal'], group['MACDhist'] = talib.MACD(group['close'])

    # 4. RSI
    group['RSI6'] = talib.RSI(group['close'], timeperiod=6)
    group['RSI12'] = talib.RSI(group['close'], timeperiod=12)
    group['RSI24'] = talib.RSI(group['close'], timeperiod=24)

    # 5. 布林带
    group['upperband'], group['middleband'], group['lowerband'] = talib.BBANDS(group['close'])

    # 6. 随机指标KDJ
    high = group['high'].values
    low = group['low'].values
    close = group['close'].values

    group['slowk'], group['slowd'] = talib.STOCH(high, low, close)
    group['slowj'] = 3 * group['slowk'] - 2 * group['slowd']

    # 7. OBV能量潮
    group['OBV'] = talib.OBV(group['close'], group['vol'])

    # 8. ATR真实波幅
    group['ATR14'] = talib.ATR(group['high'], group['low'], group['close'], timeperiod=14)

    # 9. CCI顺势指标
    group['CCI14'] = talib.CCI(group['high'], group['low'], group['close'], timeperiod=14)

    # 10. 威廉指标
    group['WILLR14'] = talib.WILLR(group['high'], group['low'], group['close'], timeperiod=14)

    # 11. ADX平均趋向指数
    group['ADX14'] = talib.ADX(group['high'], group['low'], group['close'], timeperiod=14)

    # 12. 动量指标
    group['MOM10'] = talib.MOM(group['close'], timeperiod=10)

    # 13. 价格变化率
    group['ROC10'] = talib.ROC(group['close'], timeperiod=10)

    # 14. 抛物线转向指标
    group['SAR'] = talib.SAR(group['high'], group['low'])

    # 15. TRIX三重指数平滑平均线
    group['TRIX'] = talib.TRIX(group['close'], timeperiod=14)

    # 数据分类/打标签
    # 方法1: 未来N日涨跌幅标签 (分类)
    n_days = 5  # 可以调整
    group['future_pct'] = group['close'].pct_change(n_days).shift(-n_days)

    # 创建分类标签 (1:上涨, 0:下跌)
    group['label'] = np.where(group['future_pct'] > 0, 1, 0)

    # 方法2: 趋势强度标签 (多分类)
    group['trend_strength'] = pd.cut(group['future_pct'],
                                     bins=[-np.inf, -0.05, -0.01, 0.01, 0.05, np.inf],
                                     labels=['strong_down', 'weak_down', 'neutral', 'weak_up', 'strong_up'])

    # 方法3: 价格位置标签 (相对于移动平均线)
    group['price_position'] = np.where(group['close'] > group['MA20'], 'above_MA20', 'below_MA20')

    # 将处理好的数据添加到结果DataFrame
    result_df = pd.concat([result_df, group])

# 删除含有NaN的行(由于技术指标计算产生的前面几行)
result_df = result_df.dropna()

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.impute import SimpleImputer
from sklearn.feature_selection import SelectKBest, f_classif

# 加载处理后的数据
df = pd.read_csv('processed_stock_data_with_indicators_labels.csv')
# 空值处理 - 使用前后均值填充
df.fillna(method='ffill', inplace=True)
df.fillna(method='bfill', inplace=True)


# 异常值处理 - 使用IQR方法
def handle_outliers(df, columns):
    for col in columns:
        if df[col].dtype in ['float64', 'int64']:
            Q1 = df[col].quantile(0.25)
            Q3 = df[col].quantile(0.75)
            IQR = Q3 - Q1
            lower_bound = Q1 - 1.5 * IQR
            upper_bound = Q3 + 1.5 * IQR

            # 将异常值替换为边界值
            df[col] = np.where(df[col] < lower_bound, lower_bound, df[col])
            df[col] = np.where(df[col] > upper_bound, upper_bound, df[col])
    return df


# 选择数值型列处理异常值
numeric_cols = df.select_dtypes(include=['float64', 'int64']).columns.tolist()
df = handle_outliers(df, numeric_cols)

# 可视化异常值处理前后的对比
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
sns.boxplot(data=df[['close', 'vol', 'pct_chg']])
plt.title('异常值处理前')

plt.subplot(1, 2, 2)
sns.boxplot(data=df[['close', 'vol', 'pct_chg']])
plt.title('异常值处理后')
plt.tight_layout()
plt.show()

# 选择需要标准化的特征列（排除非数值列和标签列）
features = df.select_dtypes(include=['float64', 'int64']).columns.tolist()
features.remove('label')  # 移除标签列
if 'trend_strength' in features: features.remove('trend_strength')

# 标准化处理
scaler = StandardScaler()
df_scaled = pd.DataFrame(scaler.fit_transform(df[features]), columns=features)

# 将标准化后的数据合并回原DataFrame
df[features] = df_scaled

# 可视化标准化前后的对比
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
sns.kdeplot(data=df[features].sample(5, axis=1), legend=True)
plt.title('标准化前数据分布')

plt.subplot(1, 2, 2)
sns.kdeplot(data=df_scaled.sample(5, axis=1), legend=True)
plt.title('标准化后数据分布')
plt.tight_layout()
plt.show()

# 计算特征相关性矩阵
corr_matrix = df[features].corr()

# 可视化相关性热力图
plt.figure(figsize=(16, 12))
sns.heatmap(corr_matrix, cmap='coolwarm', center=0, annot=False)
plt.title('特征相关性热力图')
plt.show()



# 选择相关性较低的特征 (阈值可根据实际情况调整)
threshold = 0.8
high_corr_features = set()
for i in range(len(corr_matrix.columns)):
    for j in range(i):
        if abs(corr_matrix.iloc[i, j]) > threshold:
            colname = corr_matrix.columns[i]
            high_corr_features.add(colname)

selected_features = [f for f in features if f not in high_corr_features]
print(f"筛选后保留的特征数量: {len(selected_features)}/{len(features)}")

# 可视化筛选后的特征相关性
plt.figure(figsize=(12, 10))
sns.heatmap(df[selected_features].corr(), cmap='coolwarm', center=0, annot=False)
plt.title('筛选后特征相关性热力图')
plt.show()

# 使用PCA进行降维
pca = PCA(n_components=0.95)  # 保留95%的方差
pca_result = pca.fit_transform(df[selected_features])

# 可视化PCA结果
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('主成分数量')
plt.ylabel('累计解释方差比例')
plt.title('PCA累计解释方差')

plt.subplot(1, 2, 2)
plt.bar(range(pca.n_components_), pca.explained_variance_ratio_)
plt.xlabel('主成分')
plt.ylabel('解释方差比例')
plt.title('各主成分解释方差')
plt.tight_layout()
plt.show()

print(f"原始特征维度: {len(selected_features)}")
print(f"PCA降维后维度: {pca.n_components_}")
print(f"累计解释方差比例: {np.cumsum(pca.explained_variance_ratio_)[-1]:.2f}")

# 将PCA结果转换为DataFrame
pca_columns = [f'PC{i+1}' for i in range(pca.n_components_)]
df_pca = pd.DataFrame(pca_result, columns=pca_columns)
df_pca['ts_code'] = df['ts_code'].values
df_pca['label'] = df['label'].values

# 确保选择的特征都存在
available_features = df.columns.tolist()
profile_features = [f for f in ['close', 'vol', 'MA20', 'RSI6', 'RSI12', 'RSI24', 'MACD', 'OBV']
                    if f in available_features][:6]  # 确保选择6个存在的特征

print("将使用的画像特征:", profile_features)

# 计算每只股票的特征均值
stock_profiles = df.groupby('ts_code')[profile_features].mean()

# 标准化画像数据
scaler_profile = MinMaxScaler()
stock_profiles_scaled = pd.DataFrame(scaler_profile.fit_transform(stock_profiles),
                                     columns=profile_features,
                                     index=stock_profiles.index)


# 可视化个股画像
def plot_stock_profile(stock_code):
    plt.figure(figsize=(8, 6))
    values = stock_profiles_scaled.loc[stock_code].values
    angles = np.linspace(0, 2 * np.pi, len(profile_features), endpoint=False)
    values = np.concatenate((values, [values[0]]))
    angles = np.concatenate((angles, [angles[0]]))

    plt.polar(angles, values, marker='o')
    plt.fill(angles, values, alpha=0.25)
    plt.thetagrids(angles[:-1] * 180 / np.pi, profile_features)
    plt.title(f'股票{stock_code}画像', y=1.1)
    plt.show()


# 随机选择3只股票展示画像
sample_stocks = np.random.choice(stock_profiles.index, 10, replace=False)
for stock in sample_stocks:
    plot_stock_profile(stock)


# 检查标签分布
label_counts = df['label'].value_counts()
print("标签分布:\n", label_counts)

plt.figure(figsize=(8, 5))
sns.barplot(x=label_counts.index, y=label_counts.values)
plt.title('标签分布情况')
plt.xlabel('标签')
plt.ylabel('数量')
plt.show()

from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, roc_curve, auc
import matplotlib.pyplot as plt

# 加载处理后的数据
df = pd.read_csv('processed_stock_data_with_indicators_labels.csv')

# 选择特征和标签
features = df.select_dtypes(include=['float64', 'int64']).columns.tolist()
features.remove('label')  # 移除标签列
if 'trend_strength' in features: features.remove('trend_strength')

X = df[features]
y = df['label']

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 模型训练
model = LogisticRegression()
model.fit(X_train, y_train)

# 模型预测
y_pred = model.predict(X_test)

# 模型评价
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)

print(f"准确率: {accuracy:.2f}")
print(f"精确率: {precision:.2f}")
print(f"召回率: {recall:.2f}")
print(f"F1值: {f1:.2f}")

# 混淆矩阵可视化
conf_matrix = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(8, 6))
sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='YlGnBu')
plt.title('混淆矩阵')
plt.xlabel('预测值')
plt.ylabel('真实值')
plt.show()

# ROC曲线和AUC值可视化
y_pred_proba = model.predict_proba(X_test)[:, 1]
fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba)
roc_auc = auc(fpr, tpr)

plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('假正率')
plt.ylabel('真正率')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.show()