import numpy as np
from scipy.stats import pearsonr
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTE
import seaborn as sns
import matplotlib.pyplot as plt
from Index.dataset import DataSet
import pandas as pd
from DataControl.getDataFromTushare.tudata import pullDataFromTushare
# 示例数据
x = [1, 2, 3, 7, 5]
y = [2, 3, 4, 5, 6]

# 计算皮尔逊相关系数和p值
corr, p_value = pearsonr(x, y)

print(f"Pearson correlation coefficient: {corr}")
print(f"P-value: {p_value}")

# 示例数据集
# X = np.array([[1, 2, 3, 5], [4, 5, 6, 1], [7, 8, 9, 7]])

X = np.array([[12, 21, 3], [44, 5, -6], [7, 78, 9]])

# 创建一个 PCA 模型，指定希望降到的维度
pca = PCA(n_components = 3)

# 在数据集 X 上进行 PCA
X_pca = pca.fit_transform(X)

# 查看降维后的数据集
print("原始数据集 X 的形状：", X.shape)
print("降维后的数据集 X_pca 的形状：", X_pca.shape)
print("降维后的数据集 X_pca：")
print(X_pca)
print("每个主成分的解释方差比例：", pca.explained_variance_ratio_)
print("累计解释方差比例：", np.sum(pca.explained_variance_ratio_))

# 假设 X_train 和 Y_train 是我们的训练集特征和标签
# 在这个示例中，我们使用一些虚构的数据来演示
X_train = np.array([[0.11622591, -0.0317206],
                     [0.77481731, 0.60935141],
                     [1.25192108, -0.22367336],
                     [0.53366841, -0.30312976],
                     [1.52091956, -0.49283504],
                     [-0.28162401, -2.10400981],
                     [0.83680821, 1.72827342],
                     [0.3084254, 0.33299982],
                     [0.70472253, -0.73309052],
                     [0.28893132, -0.38761769],
                     [1.15514042, 0.0129463],
                     [0.88407872, 0.35454207],
                     [1.31301027, -0.92648734],
                     [-1.11515198, -0.93689695],
                     [-0.18410027, -0.45194484],
                     [0.9281014, 0.53085498],
                     [-0.14374509, 0.27370049],
                     [-0.41635887, -0.38299653],
                     [0.08711622, 0.93259929],
                     [1.70580611, -0.11219234],
                     [0.47436887, -0.2645749],
                     [1.07844562, -0.19435291],
                     [1.44228238, -1.31256615],
                     [1.25636713, -1.04463226]])
Y_train = np.array([0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0,
                     1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0])

# 使用 SMOTE 进行过采样
X_smote, Y_smote = SMOTE().fit_resample(X_train, Y_train)

# 打印过采样后的数据
print("过采样后的特征集：")
print(X_smote)
print("过采样后的标签集：")
print(Y_smote)

# 统计各类别样本数量
unique, counts = np.unique(Y_smote, return_counts=True)
print("各类别样本数量：")
for label, count in zip(unique, counts):
    print(f"类别 {label}: {count}")

# 创建示例数据集
file_path = r'000001.SZ.csv'
#dataset = pd.read_csv(file_path)
dataset = DataSet('000001.SZ').data
dataset = dataset.drop(columns = ['trade_date', 'Fmark'])
print(dataset.columns)

# 用于计算相关性
datasetCorr = dataset.corr()

# 绘图
# plt.rcParams['font.family'] = ['SmiHei']
plt.rcParams['axes.unicode_minus'] = False
plt.figure(figsize = (30, 15))
sns.heatmap(datasetCorr,annot = False,
            fmt = "float",
            linewidths = 0.5,
            cmap = "RdYlBu")
plt.tick_params(labelsize = 20)
font1 = {'family':'Times New Roman',
         'weight':'normal',
         'size':23}
plt.show()