#encoding=utf8
import numpy as np
import pickle
import os
import sys
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV

def load_dataset(file_name):
    '''
    从文件读入数据集
    被多处调用，请勿删除或改动本函数！！！
    '''
    try:
        with open(file_name, 'rb') as f:
            raw_dataset = pickle.load(f)
    except FileNotFoundError:
        print(f"错误: 文件 {file_name} 未找到。请确保文件路径正确。")
        return None, None
        
    try:
        example_image = raw_dataset[0][0]
    except KeyError:
        print("错误: 数据集格式不正确，无法找到类别0的数据。")
        return None, None
    except TypeError:
        print("错误: 数据集格式不正确，类别0的数据不是列表或数组。")
        return None, None

    dataset = np.empty((0, example_image.size))
    labels = np.empty((0, 1))
    
    total_images = 0
    for i_class in raw_dataset.keys():
        images_list = raw_dataset.get(i_class, [])
        if not isinstance(images_list, list) or len(images_list) == 0:
            continue
        for image in images_list:
            features = image.flatten() / 255.0  # 像素归一化到 [0,1]
            dataset = np.vstack((dataset, features))
            labels = np.vstack((labels, i_class))
            total_images += 1
            
    print(f"成功加载 {total_images} 张手写数字图片。")
    return dataset, labels

class Classifier:
    def __init__(self):
        self.model = None
        self.train_dataset, self.train_labels = load_dataset('./step1/input/training_dataset.pkl')
        if self.train_dataset is None:
            raise RuntimeError("训练数据集加载失败，无法继续。")

    def train(self):
        print("=== 正在使用 GridSearchCV 寻找最佳 K-NN 参数... ===")
        
        # 定义要搜索的超参数网格
        param_grid = {
            'n_neighbors': [1, 3, 5, 7, 9],
            'metric': ['euclidean', 'manhattan', 'minkowski']
        }
        
        knn_model = KNeighborsClassifier(n_jobs=-1)
        
        grid_search = GridSearchCV(
            knn_model,
            param_grid,
            cv=5,
            scoring='accuracy',
            n_jobs=-1,
            verbose=2
        )
        
        train_labels = self.train_labels.ravel()
        grid_search.fit(self.train_dataset, train_labels)

        self.model = grid_search.best_estimator_

        print("=== 模型训练完成 ===")
        print(f"最佳参数: {grid_search.best_params_}")
        print(f"最佳交叉验证准确率: {grid_search.best_score_:.4f}")

        # 生成并保存结果
        self.generate_results(grid_search)

    def generate_results(self, grid_search):
        # 确保输出目录存在
        output_dir = "./step1/output/knn_grid_search"
        os.makedirs(output_dir, exist_ok=True)
        
        # 保存 Markdown 报告
        self.save_markdown_report(grid_search, output_dir)
        
        # 生成并保存热力图
        self.generate_heatmap(grid_search, output_dir)

    def save_markdown_report(self, grid_search, output_dir):
        report_path = os.path.join(output_dir, "report.md")
        best_params = grid_search.best_params_
        best_score = grid_search.best_score_
        
        with open(report_path, "w", encoding="utf-8") as f:
            f.write("# K-NN 模型超参数调优报告\n\n")
            f.write("## 最佳参数\n\n")
            f.write(f"在网格搜索中，找到的最佳参数组合为：\n\n")
            f.write(f"- 邻居数 (`n_neighbors`): **{best_params['n_neighbors']}**\n")
            f.write(f"- 距离度量 (`metric`): **{best_params['metric']}**\n\n")
            f.write(f"该参数组合在5折交叉验证中的最佳准确率为： **{best_score:.4f}**。\n")
            f.write("\n---\n\n")
            
        print(f"K-NN 模型调优报告已保存至 {report_path}")

    def generate_heatmap(self, grid_search, output_dir):
        print("\n=== 正在生成超参数准确率热力图... ===")
        
        results = grid_search.cv_results_
        scores = results['mean_test_score']
        
        # 提取参数值
        n_neighbors_values = grid_search.param_grid['n_neighbors']
        metric_values = grid_search.param_grid['metric']
        
        # 将结果重塑为二维矩阵
        scores_matrix = scores.reshape(len(n_neighbors_values), len(metric_values))
        
        plt.rcParams['font.sans-serif'] = ['SimHei']
        plt.rcParams['axes.unicode_minus'] = False
        plt.figure(figsize=(10, 8))
        
        ax = sns.heatmap(
            scores_matrix,
            annot=True,
            fmt=".4f",
            cmap="RdYlGn",
            xticklabels=metric_values,
            yticklabels=n_neighbors_values,
            vmin=scores.min(),
            vmax=scores.max()
        )
        
        ax.set_title('K-NN 超参数准确率热力图', fontsize=16)
        ax.set_xlabel('距离度量 (metric)', fontsize=12)
        ax.set_ylabel('邻居数 (n_neighbors)', fontsize=12)
        plt.tight_layout()
        
        figure_path = os.path.join(output_dir, 'knn_heatmap.png')
        plt.savefig(figure_path)
        plt.close()
        print(f"超参数热力图已保存为 '{figure_path}'")

    def predict(self, test_dataset):
        '''
        输入：测试数据 test_dataset: 形状为(500, 784)的ndarray
        输出：预测结果 predicted_labels: 形状为(500, )的ndarray
        '''
        predicted_labels = self.model.predict(test_dataset)
        return predicted_labels


def calculate_accuracy(file_name, classifier):
    test_dataset, test_labels = load_dataset(file_name)
    if test_dataset is None:
        return 0
    random_indices = np.random.permutation(test_dataset.shape[0])
    test_dataset = test_dataset[random_indices,:]
    test_labels = test_labels[random_indices,:]
    predicted_labels = classifier.predict(test_dataset)
    if isinstance(predicted_labels, np.ndarray):
        if predicted_labels.size != test_labels.size:
            print('错误：输出的标签数量与测试集大小不一致')
            accuracy = 0
        else:
            accuracy = np.mean(predicted_labels.flatten()==test_labels.flatten())
    else:
        print('错误：输出格式有误，必须为ndarray格式')
        accuracy = 0
    return accuracy


if __name__ == '__main__':
    classifier = Classifier()
    classifier.train()

    sum_accuracies = 0
    num_test_datasets = 0

    test_dir = './step1/input'
    test_files = ['test_dataset_clean.pkl'] + [
        f'test_dataset_noise_type{noise}_level{level}.pkl'
        for noise in range(1, 7)
        for level in range(1, 4)
    ]

    print("\n=== 正在对所有测试集进行评估... ===")
    with tqdm(total=len(test_files), desc="正在测试", file=sys.stdout) as pbar:
        for file_name in test_files:
            file_path = os.path.join(test_dir, file_name)
            pbar.set_description(f"正在测试: {file_name}")
            accuracy = calculate_accuracy(file_path, classifier)
            pbar.set_postfix({'正确率': f'{accuracy:.4f}'})
            pbar.update(1)
            sum_accuracies += accuracy
            num_test_datasets += 1
    
    mean_accuracies = sum_accuracies / num_test_datasets
    print(f'\n你在总共{num_test_datasets}个测试集上的平均正确率为：{mean_accuracies:.4f}')
    
    # 将最终平均准确率写入报告
    report_path = "./step1/output/knn_grid_search/report.md"
    if os.path.exists(report_path):
        with open(report_path, "a", encoding="utf-8") as f:
            f.write(f"## 模型在所有测试集上的表现\n\n")
            f.write(f"使用找到的最佳参数，模型在 **{num_test_datasets}** 个测试集上的平均准确率为： **{mean_accuracies:.4f}**。")