
import os
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import itertools
import numpy as np

# --- 配置 ---
# 创建输出目录
if not os.path.exists('output'):
    os.makedirs('output')

# 数据文件路径
TRAIN_FILE = 'iris_training.csv'
TEST_FILE = 'iris_test.csv'

# 定义类别名称
CLASS_NAMES = ['setosa', 'versicolor', 'virginica']

# --- 1. 数据加载和预处理 ---
def load_data(filepath, feature_cols):
    """加载CSV数据并分离特征和标签"""
    # 根据通用Iris数据集知识，前4列是特征，第5列是标签
    col_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']
    # 跳过第一行，因为它不是实际数据
    df = pd.read_csv(filepath, header=None, names=col_names, skiprows=1)

    X = df[feature_cols]
    y = pd.to_numeric(df['species'], errors='coerce').fillna(0).astype(np.int64)
    return X, y

# --- 2. 模型构建 ---
def build_model(input_shape, num_classes):
    """构建、编译并返回一个Keras模型"""
    model = Sequential([
        Dense(10, activation='relu', input_shape=(input_shape,)),
        Dense(10, activation='relu'),
        Dense(num_classes, activation='softmax')
    ])
    return model

# --- 3. 实验执行 ---
def run_experiment(hyperparams):
    """
    使用给定的超参数运行一次完整的训练和评估实验
    """
    # 解包超参数
    feature_combination = hyperparams['feature_combination']
    learning_rate = hyperparams['learning_rate']
    epochs = hyperparams['epochs']
    
    print(f"--- 开始实验: 特征={feature_combination}, LR={learning_rate}, Epochs={epochs} ---")

    # 加载数据
    X_train_full, y_train_full = load_data(TRAIN_FILE, feature_combination)
    X_test, y_test = load_data(TEST_FILE, feature_combination)

    # 标准化特征
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train_full)
    X_test_scaled = scaler.transform(X_test)

    # 构建和编译模型
    model = build_model(input_shape=len(feature_combination), num_classes=len(CLASS_NAMES))
    optimizer = Adam(learning_rate=learning_rate)
    model.compile(optimizer=optimizer,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    # 训练模型
    history = model.fit(X_train_scaled, y_train_full,
                        epochs=epochs,
                        validation_data=(X_test_scaled, y_test),
                        verbose=0) # 设置为0以减少日志输出

    # 评估模型
    train_loss, train_acc = model.evaluate(X_train_scaled, y_train_full, verbose=0)
    test_loss, test_acc = model.evaluate(X_test_scaled, y_test, verbose=0)
    
    print(f"--- 实验结束: 训练准确率={train_acc:.4f}, 测试准确率={test_acc:.4f} ---")

    # 记录结果
    result = {
        'features': '+'.join(feature_combination),
        'num_features': len(feature_combination),
        'learning_rate': learning_rate,
        'epochs': epochs,
        'train_loss': train_loss,
        'train_accuracy': train_acc,
        'test_loss': test_loss,
        'test_accuracy': test_acc,
        'history': history.history # 保存训练历史用于绘图
    }
    return result

# --- 4. 主程序 ---
if __name__ == '__main__':
    # 定义要测试的超参数网格
    feature_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
    feature_combinations = []
    for i in [2, 3, 4]:
        feature_combinations.extend(itertools.combinations(feature_names, i))
    
    learning_rates = [0.01, 0.005, 0.001]
    epochs_options = [50, 100, 150]

    # 构建实验列表
    experiments = []
    for features in feature_combinations:
        for lr in learning_rates:
            for epochs in epochs_options:
                experiments.append({
                    'feature_combination': list(features),
                    'learning_rate': lr,
                    'epochs': epochs
                })

    # 运行所有实验
    all_results = [run_experiment(params) for params in experiments]

    # --- 5. 结果处理和可视化 ---
    results_df = pd.DataFrame(all_results)
    
    # 找出最佳结果 (基于测试集准确率)
    best_result_row = results_df.loc[results_df['test_accuracy'].idxmax()]
    
    # 保存所有结果到CSV
    results_df.drop('history', axis=1).to_csv('output/all_experiment_results.csv', index=False)

    print("\n========================= 实验完成 =========================")
    print("所有实验结果已保存到 output/all_experiment_results.csv")
    
    print("\n--- 最佳实验结果 ---")
    print(best_result_row[['features', 'learning_rate', 'epochs', 'test_accuracy']])

    # --- 可视化最佳结果的训练历史 ---
    best_history = best_result_row['history']
    plt.figure(figsize=(12, 5))
    
    # 准确率图
    plt.subplot(1, 2, 1)
    plt.plot(best_history['accuracy'], label='训练准确率')
    plt.plot(best_history['val_accuracy'], label='测试准确率')
    plt.title('最佳模型 - 准确率')
    plt.xlabel('迭代次数')
    plt.ylabel('准确率')
    plt.legend()

    # 损失图
    plt.subplot(1, 2, 2)
    plt.plot(best_history['loss'], label='训练损失')
    plt.plot(best_history['val_loss'], label='测试损失')
    plt.title('最佳模型 - 损失 (交叉熵)')
    plt.xlabel('迭代次数')
    plt.ylabel('损失')
    plt.legend()

    plt.suptitle(f"最佳模型性能: {best_result_row['features']} | LR={best_result_row['learning_rate']} | Epochs={best_result_row['epochs']}")
    plt.tight_layout(rect=[0, 0.03, 1, 0.95])
    plt.savefig('output/best_model_training_history.png')
    print("\n最佳模型的训练历史图已保存到 output/best_model_training_history.png")

    # --- 可视化不同特征组合的性能对比 ---
    avg_performance = results_df.groupby('features')['test_accuracy'].mean().sort_values(ascending=False)
    
    plt.figure(figsize=(12, 7))
    avg_performance.plot(kind='bar', color=plt.cm.viridis(np.linspace(0, 1, len(avg_performance))))
    plt.title('不同特征组合的平均测试准确率')
    plt.xlabel('特征组合')
    plt.ylabel('平均测试准确率')
    plt.xticks(rotation=45, ha='right')
    plt.tight_layout()
    plt.savefig('output/feature_combination_performance.png')
    print("不同特征组合的性能对比图已保存到 output/feature_combination_performance.png")

    # --- 打印Markdown格式的结果表格 ---
    # 为了简洁，我们只显示每个特征组合的最佳结果
    best_per_feature = results_df.loc[results_df.groupby('features')['test_accuracy'].idxmax()]
    best_per_feature = best_per_feature.sort_values('test_accuracy', ascending=False)
    
    markdown_table = best_per_feature[[ 
        'features', 'learning_rate', 'epochs', 'train_loss', 
        'train_accuracy', 'test_loss', 'test_accuracy'
    ]].to_markdown(index=False, floatfmt=".4f")

    print("\n--- 各特征组合下的最佳结果总结 (Markdown格式) ---")
    print(markdown_table)
