"""
Demo1: 鸢尾花分类 - 机器学习入门经典案例
===========================================

这个demo将教你：
1. 如何加载和探索数据
2. 如何可视化数据
3. 如何训练多个机器学习模型
4. 如何评估和对比模型性能

鸢尾花数据集包含150个样本，3个类别（Setosa、Versicolor、Virginica）
每个样本有4个特征：花萼长度、花萼宽度、花瓣长度、花瓣宽度
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import warnings
warnings.filterwarnings('ignore')

# 设置中文显示
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

print("=" * 60)
print("欢迎来到机器学习入门 - 鸢尾花分类")
print("=" * 60)

# ============================================
# 步骤1: 加载数据
# ============================================
print("\n【步骤1】加载鸢尾花数据集...")
iris = load_iris()
X = iris.data  # 特征数据
y = iris.target  # 标签数据

# 创建DataFrame方便查看
df = pd.DataFrame(X, columns=iris.feature_names)
df['species'] = pd.Categorical.from_codes(y, iris.target_names)

print(f"✓ 数据加载成功！")
print(f"  - 样本数量: {len(df)}")
print(f"  - 特征数量: {X.shape[1]}")
print(f"  - 类别数量: {len(iris.target_names)}")
print(f"  - 类别名称: {', '.join(iris.target_names)}")

print("\n前5行数据预览:")
print(df.head())

print("\n数据统计信息:")
print(df.describe())

# ============================================
# 步骤2: 数据可视化
# ============================================
print("\n【步骤2】数据可视化...")

# 创建图形
fig, axes = plt.subplots(2, 2, figsize=(12, 10))
fig.suptitle('鸢尾花数据集可视化分析', fontsize=16, fontweight='bold')

# 2.1 特征分布箱线图
ax1 = axes[0, 0]
df_melted = df.melt(id_vars='species', var_name='feature', value_name='value')
sns.boxplot(data=df_melted, x='feature', y='value', hue='species', ax=ax1)
ax1.set_title('各特征的分布情况')
ax1.set_xlabel('特征')
ax1.set_ylabel('数值')
ax1.tick_params(axis='x', rotation=15)

# 2.2 花瓣长度vs花瓣宽度散点图
ax2 = axes[0, 1]
for species in iris.target_names:
    subset = df[df['species'] == species]
    ax2.scatter(subset['petal length (cm)'], subset['petal width (cm)'], 
                label=species, alpha=0.7, s=50)
ax2.set_xlabel('花瓣长度 (cm)')
ax2.set_ylabel('花瓣宽度 (cm)')
ax2.set_title('花瓣长度 vs 花瓣宽度')
ax2.legend()
ax2.grid(True, alpha=0.3)

# 2.3 特征相关性热力图
ax3 = axes[1, 0]
correlation = df.iloc[:, :-1].corr()
sns.heatmap(correlation, annot=True, fmt='.2f', cmap='coolwarm', ax=ax3)
ax3.set_title('特征相关性热力图')

# 2.4 类别分布饼图
ax4 = axes[1, 1]
species_counts = df['species'].value_counts()
ax4.pie(species_counts, labels=species_counts.index, autopct='%1.1f%%', 
        startangle=90, colors=['#ff9999', '#66b3ff', '#99ff99'])
ax4.set_title('各类别样本数量分布')

plt.tight_layout()
plt.savefig('iris_visualization.png', dpi=300, bbox_inches='tight')
print("✓ 可视化图表已保存: iris_visualization.png")

# ============================================
# 步骤3: 数据预处理
# ============================================
print("\n【步骤3】数据预处理...")

# 划分训练集和测试集 (80% 训练, 20% 测试)
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.2, random_state=42, stratify=y
)

print(f"✓ 数据集划分完成")
print(f"  - 训练集样本数: {len(X_train)}")
print(f"  - 测试集样本数: {len(X_test)}")

# 特征标准化
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

print(f"✓ 特征标准化完成")

# ============================================
# 步骤4: 训练多个模型
# ============================================
print("\n【步骤4】训练多个机器学习模型...")

# 定义多个模型
models = {
    '决策树': DecisionTreeClassifier(random_state=42),
    'K近邻(KNN)': KNeighborsClassifier(n_neighbors=5),
    '支持向量机(SVM)': SVC(kernel='rbf', random_state=42),
    '随机森林': RandomForestClassifier(n_estimators=100, random_state=42)
}

results = {}

for name, model in models.items():
    print(f"\n训练 {name} 模型...")
    
    # 训练模型
    model.fit(X_train_scaled, y_train)
    
    # 预测
    y_pred = model.predict(X_test_scaled)
    
    # 计算准确率
    accuracy = accuracy_score(y_test, y_pred)
    results[name] = accuracy
    
    print(f"✓ {name} 训练完成，准确率: {accuracy:.4f} ({accuracy*100:.2f}%)")

# ============================================
# 步骤5: 模型评估和对比
# ============================================
print("\n【步骤5】模型评估和对比...")

# 找出最佳模型
best_model_name = max(results, key=results.get)
best_accuracy = results[best_model_name]

print(f"\n{'='*60}")
print(f"🏆 最佳模型: {best_model_name}")
print(f"🎯 准确率: {best_accuracy:.4f} ({best_accuracy*100:.2f}%)")
print(f"{'='*60}")

# 详细评估最佳模型
print(f"\n{best_model_name} 的详细评估报告:")
best_model = models[best_model_name]
y_pred_best = best_model.predict(X_test_scaled)

print("\n分类报告:")
print(classification_report(y_test, y_pred_best, target_names=iris.target_names))

# 混淆矩阵
cm = confusion_matrix(y_test, y_pred_best)
print("\n混淆矩阵:")
print(cm)

# 可视化模型对比
fig, axes = plt.subplots(1, 2, figsize=(14, 5))

# 模型准确率对比柱状图
ax1 = axes[0]
model_names = list(results.keys())
accuracies = list(results.values())
colors = ['#ff9999' if acc != best_accuracy else '#66ff66' for acc in accuracies]

bars = ax1.bar(model_names, accuracies, color=colors, alpha=0.7, edgecolor='black')
ax1.set_ylabel('准确率')
ax1.set_title('各模型准确率对比')
ax1.set_ylim([0.9, 1.0])
ax1.grid(axis='y', alpha=0.3)

# 在柱状图上显示数值
for bar, acc in zip(bars, accuracies):
    height = bar.get_height()
    ax1.text(bar.get_x() + bar.get_width()/2., height,
             f'{acc:.4f}', ha='center', va='bottom', fontweight='bold')

# 混淆矩阵热力图
ax2 = axes[1]
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', 
            xticklabels=iris.target_names, 
            yticklabels=iris.target_names, ax=ax2)
ax2.set_xlabel('预测类别')
ax2.set_ylabel('真实类别')
ax2.set_title(f'{best_model_name} 混淆矩阵')

plt.tight_layout()
plt.savefig('model_comparison.png', dpi=300, bbox_inches='tight')
print("\n✓ 模型对比图表已保存: model_comparison.png")

# ============================================
# 步骤6: 实际预测示例
# ============================================
print("\n【步骤6】使用最佳模型进行预测...")

# 创建一些测试样本
test_samples = np.array([
    [5.1, 3.5, 1.4, 0.2],  # 应该是 Setosa
    [6.2, 2.9, 4.3, 1.3],  # 应该是 Versicolor
    [7.3, 2.9, 6.3, 1.8],  # 应该是 Virginica
])

test_samples_scaled = scaler.transform(test_samples)
predictions = best_model.predict(test_samples_scaled)

print("\n预测示例:")
for i, (sample, pred) in enumerate(zip(test_samples, predictions), 1):
    print(f"  样本{i}: {sample} -> 预测类别: {iris.target_names[pred]}")

# ============================================
# 总结
# ============================================
print("\n" + "="*60)
print("🎉 恭喜！你已经完成了第一个机器学习项目！")
print("="*60)
print("\n你学到了什么：")
print("  ✓ 如何加载和探索数据")
print("  ✓ 如何进行数据可视化")
print("  ✓ 如何划分训练集和测试集")
print("  ✓ 如何训练多个机器学习模型")
print("  ✓ 如何评估和对比模型性能")
print("  ✓ 如何使用模型进行预测")

print("\n💡 扩展练习建议：")
print("  1. 尝试调整模型参数，看看能否提高准确率")
print("  2. 尝试只使用2个特征进行分类，观察效果变化")
print("  3. 尝试使用其他机器学习算法（如逻辑回归、朴素贝叶斯）")
print("  4. 尝试使用交叉验证来评估模型")

print("\n📚 下一步：运行 demo2_mnist/mnist_recognition.py 学习深度学习！")
print("="*60)

plt.show()
