import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import train_test_split

# --- 1. 数据加载与筛选 ---
print("--- 1. 数据加载与筛选 ---")
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['target'] = iris.target
df['species'] = iris.target_names[iris.target]

# 选择Setosa (0) 和 Versicolor (1) 两个类别
df_binary = df[df['target'].isin([0, 1])].copy()

# 将标签0映射为-1，1映射为1
df_binary['target_qboost'] = df_binary['target'].map({0: -1, 1: 1})

# 特征和标签
X = df_binary[iris.feature_names]
y = df_binary['target_qboost']
y_original = df_binary['target'] # 保留原始标签用于某些可视化，特别是LDA

N_samples = X.shape[0]
N_features = X.shape[1]
print(f"筛选后样本数量: {N_samples}")
print(f"特征数量: {N_features}\n")

# --- 2. 统计学特征分析 ---
print("--- 2. 统计学特征分析 ---")
print("整体描述性统计:\n", X.describe())
print("\n按类别分组描述性统计 (Setosa vs. Versicolor):")
print(df_binary.groupby('species')[iris.feature_names].describe())

# --- 3. 可视化 ---
print("\n--- 3. 数据可视化 ---")

# 箱线图
plt.figure(figsize=(15, 6))
for i, feature in enumerate(iris.feature_names):
    plt.subplot(1, N_features, i + 1)
    sns.boxplot(x='species', y=feature, data=df_binary)
    plt.title(f'Box Plot of {feature} by Species')
plt.tight_layout()
plt.savefig('boxplot_features.png')
plt.show()

# KDE/直方图
plt.figure(figsize=(15, 6))
for i, feature in enumerate(iris.feature_names):
    plt.subplot(1, N_features, i + 1)
    sns.histplot(data=df_binary, x=feature, hue='species', kde=True, palette='viridis')
    plt.title(f'Distribution of {feature} by Species')
plt.tight_layout()
plt.savefig('kde_hist_features.png')
plt.show()

# 散点图矩阵
sns.pairplot(df_binary, hue='species', vars=iris.feature_names, palette='viridis')
plt.suptitle('Pair Plot of Iris Features by Species', y=1.02)
plt.savefig('pairplot_features.png')
plt.show()

# --- 4. 标准化 ---
print("\n--- 4. 数据标准化 ---")
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
X_scaled_df = pd.DataFrame(X_scaled, columns=iris.feature_names)
print("标准化后数据 (前5行):\n", X_scaled_df.head())

# --- 5. 降维 (LDA) ---
print("\n--- 5. 数据降维 (LDA) ---")
# LDA 仅在有监督任务中可用，且要求目标变量为整数类别，因此使用原始的 0/1 标签
# 修正: 对于二分类问题，LDA最多只能降到1维
lda = LinearDiscriminantAnalysis(n_components=1)
X_lda = lda.fit_transform(X_scaled, y_original) # 注意这里使用y_original

print(f"降维后数据维度: {X_lda.shape}")
print("降维后数据 (前5行):\n", X_lda[:5])

# 降维可视化 (1D LDA投影用直方图或KDE更合适)
plt.figure(figsize=(8, 6))
# 将1D投影结果转换为DataFrame以便于seaborn绘图
df_lda = pd.DataFrame({'LDA Component 1': X_lda[:, 0], 'species': y_original.map({0: 'setosa', 1: 'versicolor'})})
sns.histplot(data=df_lda, x='LDA Component 1', hue='species', kde=True, palette='viridis', stat='density', common_norm=False)
plt.title('1D LDA Projection of Iris Dataset (Setosa vs. Versicolor)')
plt.xlabel('LDA Component 1')
plt.ylabel('Density')
plt.grid(True)
plt.savefig('lda_1d_projection.png') # 保存为新的文件名
plt.show()

# --- 数据集划分 ---
print("\n--- 6. 数据集划分 ---")
# 使用标准化后的数据进行划分
X_train_scaled, X_test_scaled, y_train, y_test = train_test_split(
    X_scaled, y, test_size=0.3, random_state=42, stratify=y
)

print(f"训练集特征形状: {X_train_scaled.shape}, 训练集标签形状: {y_train.shape}")
print(f"测试集特征形状: {X_test_scaled.shape}, 测试集标签形状: {y_test.shape}")
print(f"训练集标签分布:\n{y_train.value_counts()}")
print(f"测试集标签分布:\n{y_test.value_counts()}")