#encoding=utf8
import numpy as np
import pickle
import os
import sys
from tqdm import tqdm

from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.model_selection import train_test_split

def load_dataset(file_name):
    '''
    从文件读入数据集
    被多处调用，请勿删除或改动本函数！！！
    '''
    try:
        with open(file_name, 'rb') as f:
            raw_dataset = pickle.load(f)
    except FileNotFoundError:
        print(f"错误: 文件 {file_name} 未找到。请确保文件路径正确。")
        return None, None
    
    try:
        example_image = raw_dataset[0][0]
    except KeyError:
        print("错误: 数据集格式不正确，无法找到类别0的数据。")
        return None, None
    except TypeError:
        print("错误: 数据集格式不正确，类别0的数据不是列表或数组。")
        return None, None

    dataset = np.empty((0, example_image.size))
    labels = np.empty((0, 1))
    
    total_images = 0

    for i_class in raw_dataset.keys():
        images_list = raw_dataset.get(i_class, [])
        if not isinstance(images_list, list) or len(images_list) == 0:
            continue
        for image in images_list:
            features = image.flatten() / 255.0
            dataset = np.vstack((dataset, features))
            labels = np.vstack((labels, i_class))
            total_images += 1
            
    print(f"成功加载 {total_images} 张手写数字图片。")

    return dataset, labels

class Classifier:
    def __init__(self):
        self.model = None
        self.pipeline = None
        self.train_dataset, self.train_labels = load_dataset('./step1/input/training_dataset.pkl')
        if self.train_dataset is None:
            raise RuntimeError("训练数据集加载失败，无法继续。")

    def train(self):
        print("=== 正在配置鲁棒提升树模型并进行训练... ===")
        print("训练过程可能需要几分钟，将实时输出进度。")

        # 将训练集分为训练和验证两部分，用于实时监控性能
        X_train, X_val, y_train, y_val = train_test_split(
            self.train_dataset, 
            self.train_labels.ravel(), 
            test_size=0.2, 
            random_state=44
        )
        
        # Pipeline: 使用 RobustScaler + HistGBDT
        pipeline = Pipeline([
            ('scaler', RobustScaler()),
            ('gbdt', HistGradientBoostingClassifier(
                max_iter=1,  # 每次只训练一棵树
                learning_rate=0.02,
                max_depth=5,
                min_samples_leaf=10,
                l2_regularization=1.0,
                max_bins=255,
                random_state=44
            ))
        ])

        best_score = 0
        patience = 15
        no_improvement_count = 0
        total_iters = 1500

        with tqdm(total=total_iters, desc="训练进度", file=sys.stdout) as pbar:
            for i in range(total_iters):
                # 每次只训练一棵树，然后评估
                pipeline.fit(X_train, y_train)
                
                # 评估模型在验证集上的性能
                val_score = pipeline.score(X_val, y_val)
                
                # 检查是否是最佳分数
                if val_score > best_score:
                    best_score = val_score
                    no_improvement_count = 0
                else:
                    no_improvement_count += 1
                
                pbar.set_postfix({
                    '验证集准确率': f'{val_score:.4f}',
                    '最佳准确率': f'{best_score:.4f}',
                    '未提升轮次': no_improvement_count
                })
                pbar.update(1)

                # 模拟早停
                if no_improvement_count >= patience:
                    print("\n=== 早停触发！模型性能已连续多轮无明显提升 ===")
                    break

        self.model = pipeline
        self.pipeline = self.model

        print("\n=== 模型训练完成 ===")
        print(f"最终在验证集上的最佳准确率为: {best_score:.4f}")
        print("采用了鲁棒参数配置（抗噪声优化版）")

    def predict(self, test_dataset):
        predicted_labels = self.pipeline.predict(test_dataset)
        return predicted_labels

def calculate_accuracy(file_name, classifier):
    test_dataset, test_labels = load_dataset(file_name)
    if test_dataset is None:
        return 0
    random_indices = np.random.permutation(test_dataset.shape[0])
    test_dataset = test_dataset[random_indices,:]
    test_labels = test_labels[random_indices,:]
    predicted_labels = classifier.predict(test_dataset)
    if isinstance(predicted_labels, np.ndarray):
        if predicted_labels.size != test_labels.size:
            print('错误：输出的标签数量与测试集大小不一致')
            accuracy = 0
        else:
            accuracy = np.mean(predicted_labels.flatten()==test_labels.flatten())
    else:
        print('错误：输出格式有误，必须为ndarray格式')
        accuracy = 0
    return accuracy

if __name__ == '__main__':
    classifier = Classifier()
    classifier.train()

    sum_accuracies = 0
    num_test_datasets = 0

    test_dir = './step1/input'
    test_files = ['test_dataset_clean.pkl'] + [
        f'test_dataset_noise_type{noise}_level{level}.pkl'
        for noise in range(1, 7)
        for level in range(1, 4)
    ]

    print("\n=== 正在对所有测试集进行评估... ===")
    with tqdm(total=len(test_files), desc="正在测试", file=sys.stdout) as pbar:
        for file_name in test_files:
            file_path = os.path.join(test_dir, file_name)
            pbar.set_description(f"正在测试: {file_name}")
            accuracy = calculate_accuracy(file_path, classifier)
            pbar.set_postfix({'正确率': f'{accuracy:.4f}'})
            pbar.update(1)
            sum_accuracies += accuracy
            num_test_datasets += 1
    
    mean_accuracies = sum_accuracies / num_test_datasets
    print(f'\n你在总共{num_test_datasets}个测试集上的平均正确率为：{mean_accuracies:.4f}')