import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report

# 生成一个包含数值特征和类别特征的示例数据集
X, y = make_classification(n_samples=100, n_features=5, n_informative=3, n_redundant=1, random_state=42)

# 将数值特征转换为DataFrame
df = pd.DataFrame(X, columns=['feature_1', 'feature_2', 'feature_3', 'feature_4', 'feature_5'])

# 添加一些缺失值
df.iloc[10:15, 0] = np.nan

# 添加一个类别特征
df['category'] = np.random.choice(['A', 'B'], size=100)

# 查看数据集
print("原始数据：")
print(df.head(15))

# 检测缺失值
missing_values = df.isnull().sum()
print("缺失值统计：")
print(missing_values)

# 均值填充数值特征
imputer_num = SimpleImputer(strategy='mean')
df[['feature_1']] = imputer_num.fit_transform(df[['feature_1']])

# 众数填充类别特征
imputer_cat = SimpleImputer(strategy='most_frequent')
df[['category']] = imputer_cat.fit_transform(df[['category']])

# 查看填充后的数据
print("\n填充后的数据：")
print(df.head())

# 初始化Scaler进行标准化
scaler = StandardScaler()
df[['feature_1', 'feature_2', 'feature_3', 'feature_4', 'feature_5']] = scaler.fit_transform(df[['feature_1', 'feature_2', 'feature_3', 'feature_4', 'feature_5']])
print("\n标准化后的数据：")
print(df.head())

# 使用MinMaxScaler进行归一化
normalizer = MinMaxScaler()
df[['feature_1', 'feature_2', 'feature_3', 'feature_4', 'feature_5']] = normalizer.fit_transform(df[['feature_1', 'feature_2', 'feature_3', 'feature_4', 'feature_5']])
print("\n归一化后的数据：")
print(df.head())

# 对类别特征进行One-Hot编码
encoder = OneHotEncoder(sparse_output=False)
encoded_categories = encoder.fit_transform(df[['category']])

# 将编码后的特征转换为DataFrame，并为每个编码列命名
encoded_df = pd.DataFrame(encoded_categories, columns=encoder.get_feature_names_out(['category']))

# 将编码后的特征与原始数据合并，删除原始的'category'列
df = pd.concat([df.drop('category', axis=1), encoded_df], axis=1)

# 查看编码后的数据
print("\nOne-Hot编码后的数据：")
print(df.head())

# 将数据集分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(df, y, test_size=0.2, random_state=42)

# 查看分割后的数据
print("\n训练集和测试集大小：")
print("训练集大小：", y_train.shape)
print("测试集大小：", y_test.shape)

# 创建PCA对象
pca = PCA(n_components=5)

# 对训练集和测试集进行PCA变换
X_train_pca = pca.fit_transform(X_train)
X_test_pca = pca.transform(X_test)

# 定义PCA列名
pca_columns = [f'PC{i+1}' for i in range(pca.n_components_)]

# 将降维后的数据转换为DataFrame
X_train_pca_df = pd.DataFrame(X_train_pca, columns=pca_columns)
X_test_pca_df = pd.DataFrame(X_test_pca, columns=pca_columns)

# 查看各主成分的解释方差比例
print("\n解释方差比例:")
print(pca.explained_variance_ratio_)

# 如果你需要查看总解释方差（即前5个主成分的累计贡献）
print("\n总解释方差:")
print(np.cumsum(pca.explained_variance_ratio_))

# 使用Logistic Regression训练模型
model = LogisticRegression(random_state=42)
model.fit(X_train_pca_df, y_train)

# 在测试集上进行预测
y_pred = model.predict(X_test_pca_df)

# 评估模型性能
print("\n模型准确率：", accuracy_score(y_test, y_pred))
print("\n分类报告：")
print(classification_report(y_test, y_pred))
