
# 导入tushare
import tushare as ts
# 初始化pro接口
pro = ts.pro_api('d3bdfac8ffb0ad7ae6c830bed29e28580b51f91ee085a6866c238d84')

# 拉取数据
df = pro.daily(**{
    "ts_code": "",
    "trade_date": "",
    "start_date": "",
    "end_date": "",
    "offset": "",
    "limit": ""
}, fields=[
    "ts_code",
    "trade_date",
    "open",
    "high",
    "low",
    "close",
    "pre_close",
    "change",
    "pct_chg",
    "vol",
    "amount"
])
print(df)
import tushare as ts
from datetime import datetime, timedelta
import os

# 1. 初始化 pro 接口
token = 'd3bdfac8ffb0ad7ae6c830bed29e28580b51f91ee085a6866c238d84'
pro = ts.pro_api(token)

# 2. 设置日期范围：近两年
end_date = datetime.today().strftime('%Y%m%d')
start_date = (datetime.today() - timedelta(days=730)).strftime('%Y%m%d')

# 3. 要下载的 10 支股票列表
stock_list = [
     '600519.SH', # 贵州茅台
     '000001.SZ', # 平安银行
     '000333.SZ', # 美的集团
     '600036.SH', # 招商银行
     '601318.SH', # 中国平安
     '000858.SZ', # 五粮液
     '600031.SH', # 三一重工
     '002415.SZ', # 海康威视
     '300750.SZ', # 宁德时代
     '601166.SH' # 兴业银行
]

# 4. 创建存放数据的目录
output_dir = 'C:/Users/Administrator/Desktop/a_share_daily'
os.makedirs(output_dir, exist_ok=True)

# 5. 循环拉取并保存
for code in stock_list:
    try:
        df = pro.daily(
            ts_code=code,
            start_date=start_date,
            end_date=end_date,
            fields=[
                'ts_code', 'trade_date', 'open', 'high', 'low',
                'close', 'pre_close', 'change', 'pct_chg',
                'vol', 'amount'
            ]
        )
# 按日期升序排列（可选）
        df = df.sort_values('trade_date')
# 保存为 CSV
        file_path = os.path.join(output_dir, f"{code}_daily.csv")
        df.to_csv(file_path, index=False)
        print(f"{code} 数据已保存到 {file_path}")
    except Exception as e:
        print(f"下载 {code} 时出错：{e}")

import pandas as pd
import numpy as np
import pandas_ta as ta
from sklearn.preprocessing import MinMaxScaler

import pandas as pd
import pandas_ta as ta
import os

# 1. 准备文件夹路径
desktop = os.path.join(os.path.expanduser("~"), "Desktop")
input_dir = os.path.join(desktop, "a_share_daily")
output_dir = os.path.join(desktop, "a_share_features")
os.makedirs(output_dir, exist_ok=True)

# 2. 股票列表
stock_list = [
    '600519.SH','000001.SZ','000333.SZ','600036.SH','601318.SH',
    '000858.SZ','600031.SH','002415.SZ','300750.SZ','601166.SH'
]

# 3. 循环处理
for code in stock_list:
# 读取并排序
    df = pd.read_csv(os.path.join(input_dir, f"{code}_daily.csv"),
    parse_dates=['trade_date'])
    df.sort_values('trade_date', inplace=True)

# —— 新增这两行：把 trade_date 设为 DatetimeIndex ——
    df.set_index('trade_date', inplace=True)
    df.index = pd.DatetimeIndex(df.index)
# ————————————————————————————————

# 计算技术指标（示例 16 个）
    df['EMA_5'] = ta.ema(df['close'], length=5)
    df['EMA_10'] = ta.ema(df['close'], length=10)
    df['EMA_20'] = ta.ema(df['close'], length=20)
    df['SMA_20'] = ta.sma(df['close'], length=20)
    df['RSI_14'] = ta.rsi(df['close'], length=14)
    macd = ta.macd(df['close'], fast=12, slow=26, signal=9)
    df['MACD'] = macd['MACD_12_26_9']
    df['MACDh'] = macd['MACDh_12_26_9']
    df['MACDs'] = macd['MACDs_12_26_9']
    kdj = ta.stoch(df['high'], df['low'], df['close'], k=14, d=3)
    df['K'] = kdj['STOCHk_14_3_3']
    df['D'] = kdj['STOCHd_14_3_3']
    bbands = ta.bbands(df['close'], length=20, std=2)
    df['BB_UP'] = bbands['BBU_20_2.0']
    df['BB_MA'] = bbands['BBM_20_2.0']
    df['BB_DN'] = bbands['BBL_20_2.0']
    df['ATR_14'] = ta.atr(df['high'], df['low'], df['close'], length=14)
    df['OBV'] = ta.obv(df['close'], df['vol'])
    df['ADX_14'] = ta.adx(df['high'], df['low'], df['close'], length=14)['ADX_14']
    df['CCI_20'] = ta.cci(df['high'], df['low'], df['close'], length=20)
    df['MOM_10'] = ta.mom(df['close'], length=10)
    df['VWAP'] = ta.vwap(df['high'], df['low'], df['close'], df['vol'])

# 恢复列索引而不是 datetime index
    df.reset_index(inplace=True)

# 处理好缺失值
    df.fillna(method='ffill', inplace=True)
    df.dropna(inplace=True)

# 打标签：明日涨幅正为1，否则0
    df['next_pct'] = df['close'].pct_change().shift(-1) * 100
    df['label'] = (df['next_pct'] > 0).astype(int)
    df.drop(columns=['next_pct'], inplace=True)

# 保存
    out_path = os.path.join(output_dir, f"{code}_features_labels.csv")
    df.to_csv(out_path, index=False, encoding='utf-8-sig')
    print(f"{code} 特征与标签已保存：{out_path}")

import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler

# 桌面基础路径
desktop = os.path.join(os.path.expanduser("~"), "Desktop")
input_dir = os.path.join(desktop, "a_share_features") # 特征文件所在
viz_base = os.path.join(desktop, "a_share_viz") # 所有可视化输出
os.makedirs(viz_base, exist_ok=True)

# 10 支股票列表
stock_list = [
    '600519.SH','000001.SZ','000333.SZ','600036.SH','601318.SH',
    '000858.SZ','600031.SH','002415.SZ','300750.SZ','601166.SH'
]

for code in stock_list:
    # 1. 准备每支股票的输出目录
    out_dir = os.path.join(viz_base, code)
    os.makedirs(out_dir, exist_ok=True)

# 2. 读取并排序
    df = pd.read_csv(os.path.join(input_dir, f"{code}_features_labels.csv"),
    parse_dates=['trade_date'])
    df.sort_values('trade_date', inplace=True)

    # 3. 时间序列趋势图：Close vs EMA5 vs RSI14
    plt.figure()
    plt.plot(df['trade_date'], df['close'], label='Close')
    plt.plot(df['trade_date'], df['EMA_5'], label='EMA_5')
    plt.plot(df['trade_date'], df['RSI_14'], label='RSI_14')
    plt.legend()
    plt.title(f'{code} Close vs EMA5 vs RSI14')
    plt.xlabel('Date')
    plt.ylabel('Value')
    plt.tight_layout()
    plt.savefig(os.path.join(out_dir, 'trend.png'))
    plt.close()

# 4. 分布直方图：RSI14
    plt.figure()
    sns.histplot(df['RSI_14'], bins=30, kde=True)
    plt.title(f'{code} RSI_14 Distribution')
    plt.xlabel('RSI_14')
    plt.ylabel('Density')
    plt.tight_layout()
    plt.savefig(os.path.join(out_dir, 'rsi_hist.png'))
    plt.close()

# 5. 相关性热图
    features = df.drop(columns=['trade_date', 'ts_code', 'label']).select_dtypes('number')
    corr = features.corr()
    plt.figure(figsize=(10,8))
    sns.heatmap(corr, cmap='coolwarm', center=0, square=True)
    plt.title(f'{code} Feature Correlation')
    plt.tight_layout()
    plt.savefig(os.path.join(out_dir, 'corr_heatmap.png'))
    plt.close()

# 6. PCA 可视化
    scaler = StandardScaler()
    X_std = scaler.fit_transform(features)
    pca = PCA(n_components=2)
    X_pca = pca.fit_transform(X_std)
    var_pct = pca.explained_variance_ratio_.sum() * 100

    plt.figure()
    plt.scatter(X_pca[:,0], X_pca[:,1], c=df['label'], s=5, cmap='viridis')
    plt.xlabel('PC1')
    plt.ylabel('PC2')
    plt.title(f'{code} PCA ({var_pct:.1f}% Variance)')
    plt.colorbar(label='Label')
    plt.tight_layout()
    plt.savefig(os.path.join(out_dir, 'pca.png'))
    plt.close()

# 7. t-SNE 可视化
    tsne = TSNE(n_components=2, perplexity=30, n_iter=500, init='pca')
    X_tsne = tsne.fit_transform(X_std)

    plt.figure()
    plt.scatter(X_tsne[:,0], X_tsne[:,1], c=df['label'], s=5, cmap='plasma')
    plt.xlabel('t-SNE1')
    plt.ylabel('t-SNE2')
    plt.title(f'{code} t-SNE')
    plt.colorbar(label='Label')
    plt.tight_layout()
    plt.savefig(os.path.join(out_dir, 'tsne.png'))
    plt.close()

    print(f"{code} 的所有可视化已生成于：{out_dir}")
import pandas as pd
from sklearn.model_selection import train_test_split

# 读取特征与标签（以“300750.SH”为例）
df = pd.read_csv('300750.SZ_features_labels.csv',
parse_dates=['trade_date']
)

# 去除非特征列
X = df.drop(columns=['trade_date', 'ts_code', 'label']) # 特征矩阵
y = df['label'] # 二分类标签

# 按时间顺序划分训练/测试集（避免信息泄漏）
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, shuffle=False
) # 无shuffle的时间序列分割

from xgboost import XGBClassifier

# 初始化 XGBoost 分类器
model = XGBClassifier(
objective='binary:logistic',
n_estimators=100,
learning_rate=0.1,
max_depth=5,
subsample=0.8,
colsample_bytree=0.8,
use_label_encoder=False,
eval_metric='logloss',
random_state=42
) # 参数设置参考 XGBoost 文档

# 训练
model.fit(X_train, y_train) # 训练 XGBoost 二分类模型

from sklearn.metrics import (
accuracy_score, precision_score, recall_score,
f1_score, roc_auc_score, confusion_matrix, classification_report
)

# 预测与概率
y_pred = model.predict(X_test)
y_proba = model.predict_proba(X_test)[:,1] # 获取正类概率 [oai_citation_attribution:5‡Kaggle](https://www.kaggle.com/code/wrecked22/basic-binary-classification-using-xgboost?utm_source=chatgpt.com)

# 计算分类指标
acc = accuracy_score(y_test, y_pred) # Accuracy [oai_citation_attribution:6‡Kaggle](https://www.kaggle.com/code/wrecked22/basic-binary-classification-using-xgboost?utm_source=chatgpt.com)
prec = precision_score(y_test, y_pred) # Precision [oai_citation_attribution:7‡Kaggle](https://www.kaggle.com/code/wrecked22/basic-binary-classification-using-xgboost?utm_source=chatgpt.com)
rec = recall_score(y_test, y_pred) # Recall [oai_citation_attribution:8‡Kaggle](https://www.kaggle.com/code/wrecked22/basic-binary-classification-using-xgboost?utm_source=chatgpt.com)
f1 = f1_score(y_test, y_pred) # F1 Score [oai_citation_attribution:9‡Kaggle](https://www.kaggle.com/code/wrecked22/basic-binary-classification-using-xgboost?utm_source=chatgpt.com)
auc = roc_auc_score(y_test, y_proba) # ROC-AUC [oai_citation_attribution:10‡Random Realizations](https://randomrealizations.com/posts/xgboost-for-classification-in-python/?utm_source=chatgpt.com)
cm = confusion_matrix(y_test, y_pred) # 混淆矩阵 [oai_citation_attribution:11‡Kaggle](https://www.kaggle.com/code/wrecked22/basic-binary-classification-using-xgboost?utm_source=chatgpt.com)

print(f"Accuracy : {acc:.4f}")
print(f"Precision: {prec:.4f}")
print(f"Recall : {rec:.4f}")
print(f"F1 Score : {f1:.4f}")
print(f"ROC-AUC : {auc:.4f}")
print("Confusion Matrix:\n", cm)
print("\nClassification Report:\n", classification_report(y_test, y_pred))

from sklearn.model_selection import TimeSeriesSplit, GridSearchCV

# 1) 定义时间序列切分器
tscv = TimeSeriesSplit(n_splits=5) # 扩展窗口法 [oai_citation_attribution:12‡scikit-learn](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.TimeSeriesSplit.html?utm_source=chatgpt.com)

# 2) 网格搜索参数空间
param_grid = {
'max_depth': [3, 5, 7],
'learning_rate': [0.01, 0.1],
'subsample': [0.6, 0.8, 1.0]
}

# 3) 设置 GridSearchCV
grid = GridSearchCV(
estimator=model,
param_grid=param_grid,
cv=tscv,
scoring='f1',
n_jobs=-1
)

# 4) 运行调参
grid.fit(X, y) # 时间序列 CV 调参 [oai_citation_attribution:13‡Kaggle](https://www.kaggle.com/code/tomwarrens/timeseriessplit-how-to-use-it?utm_source=chatgpt.com)

print("Best Parameters :", grid.best_params_)
print("Best CV F1 Score:", grid.best_score_)