import tushare as ts
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import time
from datetime import datetime, timedelta
# 设置中文字体为黑体（需确保系统中安装了黑体字体）
plt.rcParams['font.sans-serif'] = ['SimHei']
# 解决负号显示问题
plt.rcParams['axes.unicode_minus'] = False
# 配置Tushare
ts.set_token('1c7f85b9026518588c0d0cdac712c2d17344332c9c8cfe6bc83ee75c')
pro = ts.pro_api()

# 1. 数据获取
stocks = ['600519.SH', '600036.SH', '601318.SH', '600887.SH', '600276.SH',
          '000858.SZ', '000333.SZ', '002024.SZ', '601012.SH', '600703.SH']

end_date = datetime.now().strftime('%Y%m%d')
start_date = (datetime.now() - timedelta(days=730)).strftime('%Y%m%d')

df_list = []
for stock in stocks:
    try:
        data = pro.daily(ts_code=stock, start_date=start_date, end_date=end_date)
        data['ts_code'] = stock
        df_list.append(data)
        time.sleep(1)
    except Exception as e:
        print(f"获取{stock}数据失败: {e}")

all_data = pd.concat(df_list, ignore_index=True)
all_data['trade_date'] = pd.to_datetime(all_data['trade_date'])
all_data.set_index(['trade_date', 'ts_code'], inplace=True)
all_data.sort_index(inplace=True)
print(f"数据获取完成，共{len(all_data)}条记录")

# 2. 分类打标签与技术指标计算（不依赖外部库）
data = all_data[['open', 'high', 'low', 'close', 'vol']].dropna()

data['return'] = data['close'].pct_change()
data['label'] = pd.cut(data['return'],
                       bins=[-np.inf, -0.02, 0.02, np.inf],
                       labels=['下跌', '震荡', '上涨'])
data['label'] = data['label'].map({'下跌': 0, '震荡': 1, '上涨': 2})


# 自定义技术指标计算函数
def moving_average(x, window):
    return x.rolling(window).mean()


def exponential_moving_average(x, window):
    return x.ewm(span=window, adjust=False).mean()


def rsi(close, window=14):
    delta = close.diff()
    gain = delta.where(delta > 0, 0)
    loss = -delta.where(delta < 0, 0)
    avg_gain = gain.rolling(window).mean()
    avg_loss = loss.rolling(window).mean()
    rs = avg_gain / avg_loss
    return 100 - (100 / (1 + rs))


def bollinger_bands(close, window=20, std=2):
    ma = close.rolling(window).mean()
    std_dev = close.rolling(window).std()
    upper = ma + (std_dev * std)
    lower = ma - (std_dev * std)
    return upper, lower


def macd(close, fast=12, slow=26, signal=9):
    fast_ema = exponential_moving_average(close, fast)
    slow_ema = exponential_moving_average(close, slow)
    macd_line = fast_ema - slow_ema
    signal_line = exponential_moving_average(macd_line, signal)
    histogram = macd_line - signal_line
    return macd_line, signal_line, histogram


def stochastic_oscillator(high, low, close, window=14, k=3, d=3):
    lowest_low = low.rolling(window).min()
    highest_high = high.rolling(window).max()
    k_percent = 100 * ((close - lowest_low) / (highest_high - lowest_low))
    d_percent = moving_average(k_percent, d)
    k_percent_smoothed = moving_average(k_percent, k)
    return k_percent_smoothed, d_percent


def atr(high, low, close, window=14):
    tr1 = high - low
    tr2 = abs(high - close.shift())
    tr3 = abs(low - close.shift())
    tr = pd.DataFrame({'tr1': tr1, 'tr2': tr2, 'tr3': tr3}).max(axis=1)
    return moving_average(tr, window)


def vwap(high, low, close, volume):
    typical_price = (high + low + close) / 3
    return (typical_price * volume).cumsum() / volume.cumsum()


# 计算技术指标
data['ma5'] = moving_average(data['close'], 5)
data['ma20'] = moving_average(data['close'], 20)
data['ema50'] = exponential_moving_average(data['close'], 50)
data['macd'], data['macd_signal'], _ = macd(data['close'])
data['rsi14'] = rsi(data['close'])
data['stoch_k'], data['stoch_d'] = stochastic_oscillator(data['high'], data['low'], data['close'])
data['bb_high'], data['bb_low'] = bollinger_bands(data['close'])
data['atr14'] = atr(data['high'], data['low'], data['close'])
data['vwap'] = vwap(data['high'], data['low'], data['close'], data['vol'])
data['vol_ma5'] = moving_average(data['vol'], 5)
data['close_open_ratio'] = data['close'] / data['open']
data['high_low_range'] = (data['high'] - data['low']) / data['open']

features = data[['open', 'high', 'low', 'close', 'vol', 'ma5', 'ma20', 'ema50', 'macd', 'macd_signal',
                 'rsi14', 'stoch_k', 'stoch_d', 'bb_high', 'bb_low', 'atr14', 'vwap', 'vol_ma5',
                 'close_open_ratio', 'high_low_range', 'label']].dropna()
print(f"指标计算完成，剩余{len(features)}条有效数据")

# 3. 建模前处理与分析
# 3.1 空值与异常值处理
print("空值数量：\n", features.isnull().sum())

# 修正：检查label列是否存在且为数值类型
if 'label' in features.columns and pd.api.types.is_numeric_dtype(features['label']):
    numeric_features = features.select_dtypes(include=[np.number])
    # 如果label列存在，且需要排除，则明确指定
    if 'label' in numeric_features.columns:
        numeric_features = numeric_features.drop(columns=['label'])
    # 同样检查vol列
    if 'vol' in numeric_features.columns:
        numeric_features = numeric_features.drop(columns=['vol'])
else:
    # 如果label列不存在或不是数值类型，则直接筛选数值列
    numeric_features = features.select_dtypes(include=[np.number]).drop(columns=['vol'])

# 后续代码保持不变
z_scores = np.abs((numeric_features - numeric_features.mean()) / numeric_features.std())
outliers = (z_scores > 3).any(axis=1)
print(f"异常值数量：{outliers.sum()}")

for col in numeric_features.columns:
    features[col] = np.where(z_scores[col] > 3, numeric_features[col].median(), features[col])

# 3.2 归一化（不依赖sklearn）
X = features.drop(columns=['label'])
y = features['label']


def custom_min_max_scaler(X):
    X_min = np.min(X, axis=0)
    X_max = np.max(X, axis=0)
    return (X - X_min) / (X_max - X_min + 1e-10)


X_normalized = custom_min_max_scaler(X.values)
X_normalized = pd.DataFrame(X_normalized, columns=X.columns, index=X.index)


# 3.3 相关性分析与可视化
def custom_correlation_matrix(X):
    X_centered = X - np.mean(X, axis=0)
    cov_matrix = np.dot(X_centered.T, X_centered) / (X.shape[0] - 1)
    std_devs = np.sqrt(np.diag(cov_matrix))
    corr_matrix = cov_matrix / np.outer(std_devs, std_devs)
    return corr_matrix


corr_matrix = custom_correlation_matrix(X_normalized.values)
corr_df = pd.DataFrame(corr_matrix, columns=X_normalized.columns, index=X_normalized.columns)

plt.figure(figsize=(12, 8))
sns.heatmap(corr_df, annot=True, cmap='coolwarm', vmin=-1, vmax=1)
plt.title('特征相关性热力图')
plt.show()


# 3.4 主成分分析（PCA）与降维
def custom_pca(X, n_components=0.95):
    X_centered = X - np.mean(X, axis=0)
    cov_matrix = np.cov(X_centered, rowvar=False)
    eigenvalues, eigenvectors = np.linalg.eigh(cov_matrix)
    idx = np.argsort(eigenvalues)[::-1]
    eigenvalues = eigenvalues[idx]
    eigenvectors = eigenvectors[:, idx]

    if isinstance(n_components, float):
        explained_variance = eigenvalues / np.sum(eigenvalues)
        cumulative_variance = np.cumsum(explained_variance)
        n_components = np.argmax(cumulative_variance >= n_components) + 1

    X_pca = X_centered @ eigenvectors[:, :n_components]
    return X_pca, eigenvalues[:n_components], explained_variance[:n_components]


X_pca, eigenvalues, explained_variance = custom_pca(X_normalized.values, n_components=0.95)

plt.figure(figsize=(8, 4))
plt.bar(range(1, len(explained_variance) + 1), explained_variance, alpha=0.6, label='Explained Variance')
plt.axhline(y=np.sum(explained_variance[:5]), color='r', linestyle='--')
plt.xlabel('主成分数量')
plt.ylabel('方差贡献率')
plt.title('PCA碎石图')
plt.legend()
plt.show()

# 3.5 个股画像（雷达图）
# 假设已获取个股基本信息（简化处理）
stock_profiles = pd.DataFrame({
    'ts_code': stocks,
    'market_cap': np.random.randint(100, 10000, size=len(stocks)),
    'pe': np.random.uniform(5, 50, size=len(stocks)),
    'pb': np.random.uniform(1, 10, size=len(stocks)),
    'volatility': np.random.uniform(0.1, 0.5, size=len(stocks)),
    'beta': np.random.uniform(0.5, 1.5, size=len(stocks))
}).set_index('ts_code')


# 修正后的雷达图绘制函数
def plot_radar_chart(data, title):
    labels = data.columns
    stats = data.values

    # 计算角度
    angles = np.linspace(0, 2 * np.pi, len(labels), endpoint=False).tolist()

    # 闭合雷达图（首尾相连）
    labels = labels.tolist() + [labels[0]]
    stats = np.hstack([stats, stats[:, [0]]])  # 为每个样本添加第一个特征值
    angles = angles + [angles[0]]

    # 创建极坐标图
    fig, ax = plt.subplots(figsize=(8, 8), subplot_kw=dict(polar=True))

    # 为每个样本绘制雷达图
    for i in range(stats.shape[0]):
        ax.plot(angles, stats[i], 'o-', linewidth=2, label=data.index[i])
        ax.fill(angles, stats[i], alpha=0.25)

    # 设置标签和标题
    ax.set_thetagrids(np.degrees(angles[:-1]), labels[:-1])
    ax.set_ylim(0, np.max(stats) * 1.1)
    plt.title(title)
    plt.legend(loc='upper right')
    plt.show()


# 为所有股票绘制雷达图（展示多只股票对比）
plot_radar_chart(stock_profiles, "股票画像对比")

# 3.6 数据均衡（欠采样）
def undersample(X, y):
    class_counts = y.value_counts()
    min_count = class_counts.min()

    indices = []
    for label in y.unique():
        label_indices = y[y == label].index
        sampled_indices = np.random.choice(label_indices, size=min_count, replace=False)
        indices.extend(sampled_indices)

    return X.loc[indices], y.loc[indices]


X_resampled, y_resampled = undersample(X_normalized, y)
print("均衡后标签分布：\n", y_resampled.value_counts())


# 4. 机器学习建模与评价（逻辑回归）
class LogisticRegression:
    def __init__(self, learning_rate=0.01, num_iterations=1000):
        self.learning_rate = learning_rate
        self.num_iterations = num_iterations
        self.weights = None
        self.bias = None

    def sigmoid(self, z):
        return 1 / (1 + np.exp(-z))

    def fit(self, X, y):
        n_samples, n_features = X.shape
        self.weights = np.zeros(n_features)
        self.bias = 0

        for _ in range(self.num_iterations):
            linear_model = np.dot(X, self.weights) + self.bias
            y_pred = self.sigmoid(linear_model)

            dw = (1 / n_samples) * np.dot(X.T, (y_pred - y))
            db = (1 / n_samples) * np.sum(y_pred - y)

            self.weights -= self.learning_rate * dw
            self.bias -= self.learning_rate * db

    def predict_proba(self, X):
        linear_model = np.dot(X, self.weights) + self.bias
        return self.sigmoid(linear_model)

    def predict(self, X, threshold=0.5):
        y_pred_proba = self.predict_proba(X)
        return (y_pred_proba >= threshold).astype(int)


# 二分类（仅预测上涨/非上涨）
y_binary = (y == 2).astype(int)

# 划分数据集
mask = features.index.get_level_values('trade_date') < pd.Timestamp('2024-12-31')
X_train = X_normalized[mask].values
y_train = y_binary[mask].values
X_test = X_normalized[~mask].values
y_test = y_binary[~mask].values

# 模型训练
model = LogisticRegression(learning_rate=0.01, num_iterations=1000)
model.fit(X_train, y_train)

# 预测与评价
y_pred = model.predict(X_test)
y_probs = model.predict_proba(X_test)


# 自定义评价函数
def accuracy_score(y_true, y_pred):
    return np.mean(y_true == y_pred)


def precision_score(y_true, y_pred):
    tp = np.sum((y_true == 1) & (y_pred == 1))
    fp = np.sum((y_true == 0) & (y_pred == 1))
    return tp / (tp + fp) if (tp + fp) > 0 else 0


def recall_score(y_true, y_pred):
    tp = np.sum((y_true == 1) & (y_pred == 1))
    fn = np.sum((y_true == 1) & (y_pred == 0))
    return tp / (tp + fn) if (tp + fn) > 0 else 0


def f1_score(y_true, y_pred):
    precision = precision_score(y_true, y_pred)
    recall = recall_score(y_true, y_pred)
    return 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0


print(f"准确率: {accuracy_score(y_test, y_pred):.4f}")
print(f"精确率: {precision_score(y_test, y_pred):.4f}")
print(f"召回率: {recall_score(y_test, y_pred):.4f}")
print(f"F1分数: {f1_score(y_test, y_pred):.4f}")


# 绘制混淆矩阵
def plot_confusion_matrix(y_true, y_pred):
    cm = np.zeros((2, 2), dtype=int)
    for true, pred in zip(y_true, y_pred):
        cm[true, pred] += 1

    plt.figure(figsize=(6, 4))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=['非上涨', '上涨'], yticklabels=['非上涨', '上涨'])
    plt.xlabel('预测标签')
    plt.ylabel('真实标签')
    plt.title('混淆矩阵')
    plt.show()


plot_confusion_matrix(y_test, y_pred)