import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_wine
import warnings
from factor_analyzer.factor_analyzer import calculate_bartlett_sphericity, calculate_kmo
from sklearn.preprocessing import scale
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report

warnings.filterwarnings('ignore')

# 配置字体设置 - 使用更通用的字体
plt.rcParams['font.sans-serif'] = ['Arial', 'DejaVu Sans', 'sans-serif']
plt.rcParams['axes.unicode_minus'] = False

# 导入数据集
wine = load_wine()
data = pd.DataFrame(wine['data'], columns=wine['feature_names'])

# 检查数据
print(data.head())
print(data.shape)
print(data.info())

# Bartlett's球状检验
chi_square_value, p_value = calculate_bartlett_sphericity(data)
print("Bartlett's球状检验结果：")
print(chi_square_value, p_value)

# KMO检验
kmo_all, kmo_model = calculate_kmo(data)
print("KMO检验结果：")
print(kmo_all)

# 数据标准化
data_scale = scale(data)
covX = np.around(np.corrcoef(data.T), decimals=3)  # 求相关系数矩阵

# 特征值和特征向量
featValue, featVec = np.linalg.eig(covX)
# 确保特征值是实数而不是复数
featValue = np.real(featValue)
featVec = np.real(featVec)

# 按特征值从大到小排序
idx = np.argsort(featValue)[::-1]
featValue = featValue[idx]
featVec = featVec[:, idx]

# 绘制Scree Plot
plt.figure(figsize=(10, 6))
plt.scatter(range(1, data.shape[1] + 1), featValue)
plt.plot(range(1, data.shape[1] + 1), featValue)
plt.title("Scree Plot")
plt.xlabel("Factors")
plt.ylabel("Eigenvalue")
plt.hlines(y=1, xmin=0, xmax=13)
plt.grid()
plt.savefig('scree_plot.png')  # 保存图像而不是显示
plt.close()

# 计算贡献率和累计贡献率
gx = featValue / np.sum(featValue)
lg = np.cumsum(gx)

print("各主成分贡献率:", gx)
print("累计贡献率:", lg)

# 选择主成分 - 修正逻辑，选择累计贡献率大于等于80%的前k个主成分
k = np.argmax(lg >= 0.80) + 1  # 找到第一个大于等于0.80的位置
print(f"选择前{k}个主成分，累计贡献率为{lg[k-1]:.4f}")

# 使用NumPy数组而不是矩阵 - 这里是关键修复点
selectVec = featVec[:, :k]  # 选择前k个特征向量
finalData = np.dot(data_scale, selectVec)  # 计算主成分得分

# 绘制热图
plt.figure(figsize=(14, 14))
ax = sns.heatmap(selectVec, annot=True, cmap="BuPu")
ax.yaxis.set_tick_params(labelsize=15)
plt.title("主成分分析")
plt.ylabel("变量")
plt.savefig('heatmap.png')  # 保存图像而不是显示
plt.close()

# 逻辑回归模型 - 使用主成分数据
random_state = 42  # 使用相同的随机状态
X = finalData
y = wine['target']
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=random_state)

lg = LogisticRegression(max_iter=1000)
lg.fit(x_train, y_train)
y_pred = lg.predict(x_test)

print('PCA降维后模型准确率', accuracy_score(y_test, y_pred))
print('PCA降维后混淆矩阵')
print(confusion_matrix(y_test, y_pred))
print('PCA降维后分类报告')
print(classification_report(y_test, y_pred))

# 使用原始数据进行逻辑回归
X_original = data
y_original = wine['target']
x_train_original, x_test_original, y_train_original, y_test_original = train_test_split(
    X_original, y_original, test_size=0.2, random_state=random_state  # 使用相同的随机状态
)

lg_original = LogisticRegression(max_iter=1000)
lg_original.fit(x_train_original, y_train_original)
y_pred_original = lg_original.predict(x_test_original)

print('原始数据模型准确率', accuracy_score(y_test_original, y_pred_original))
print('原始数据混淆矩阵')
print(confusion_matrix(y_test_original, y_pred_original))
print('原始数据分类报告')
print(classification_report(y_test_original, y_pred_original))