#encoding=utf8
import numpy as np
import pickle
import os
import sys
from tqdm import tqdm

# 导入PyTorch库
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset, Dataset
import torchvision.transforms as transforms
from PIL import Image

# cuDNN 加速
torch.backends.cudnn.benchmark = True
# 设置Python的递归深度以避免可能的Stack Overflow错误
sys.setrecursionlimit(2000)

def load_dataset(file_name):
    '''
    从文件读入数据集
    被多处调用，请勿删除或改动本函数！！！
    '''
    try:
        with open(file_name, 'rb') as f:
            raw_dataset = pickle.load(f)
    except FileNotFoundError:
        print(f"错误: 文件 {file_name} 未找到。请确保文件路径正确。")
        return None, None
    
    try:
        example_image = raw_dataset[0][0]
    except KeyError:
        print("错误: 数据集格式不正确，无法找到类别0的数据。")
        return None, None
    except TypeError:
        print("错误: 数据集格式不正确，类别0的数据不是列表或数组。")
        return None, None

    dataset = np.empty((0, example_image.size))
    labels = np.empty((0, 1))

    for i_class in raw_dataset.keys():
        images_list = raw_dataset.get(i_class, [])
        if not isinstance(images_list, list) or len(images_list) == 0:
            continue
        for image in images_list:
            # --- 修复后的代码 ---
            # 直接将图像对象展平并归一化，假设它已经是 NumPy 数组。
            # 这与原始的逻辑回归示例代码中的 flatten() 调用方式一致
            features = image.flatten() / 255.0
            # --- 修复结束 ---
            
            dataset = np.vstack((dataset, features))
            labels = np.vstack((labels, i_class))

    return dataset, labels

# --- 改进版CNN模型 ---
class CNN(nn.Module):
    def __init__(self, input_size, num_classes):
        super(CNN, self).__init__()
        # 假设输入图像是正方形的
        self.image_size = int(np.sqrt(input_size))
        
        # 卷积层
        self.conv_layers = nn.Sequential(
            # 第一层卷积：输入通道1，输出通道32，卷积核3x3，填充1
            nn.Conv2d(1, 32, kernel_size=3, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            # 第二层卷积：输入通道32，输出通道64，卷积核3x3，填充1
            nn.Conv2d(32, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2), # 池化层，尺寸减半
            
            # 第三层卷积：输入通道64，输出通道128，卷积核3x3，填充1
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2), # 池化层，尺寸减半
        )
        
        # 计算经过卷积和池化后图像的尺寸
        # 原始尺寸 / 2 / 2
        conv_output_size = self.image_size // 4
        # 全连接层的输入特征数
        fc_input_size = 128 * conv_output_size * conv_output_size
        
        # 全连接层
        self.fc_layers = nn.Sequential(
            nn.Linear(fc_input_size, 512),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(512, num_classes)
        )
        
        # 权重初始化
        for m in self.modules():
            if isinstance(m, (nn.Conv2d, nn.Linear)):
                nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
    
    def forward(self, x):
        # 展平的输入数据重新塑形为图像格式 (批量大小, 1, 图像高, 图像宽)
        x = x.view(-1, 1, self.image_size, self.image_size)
        x = self.conv_layers(x)
        x = x.view(x.size(0), -1) # 将多维卷积输出展平
        x = self.fc_layers(x)
        return x

# 自定义数据集类
class CustomDataset(Dataset):
    def __init__(self, data, labels, transform=None):
        self.data = torch.tensor(data, dtype=torch.float32)
        self.labels = torch.tensor(labels.ravel(), dtype=torch.long)
        self.transform = transform

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        sample = self.data[idx]
        label = self.labels[idx]
        if self.transform:
            image_size = int(np.sqrt(sample.size(0)))
            # 将展平的张量重新塑形为(C, H, W)以进行图像变换
            sample = sample.view(1, image_size, image_size)
            sample = self.transform(sample)
        
        return sample, label

class Classifier:
    def __init__(self):
        self.model = None
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        print(f"正在使用设备: {self.device}")
        self.train_dataset, self.train_labels = load_dataset('./step1/input/training_dataset.pkl')
        if self.train_dataset is None:
            raise RuntimeError("训练数据集加载失败，无法继续。")

    def train(self):
        print("\n=== 正在配置数据增强... ===")
        # 实时数据增强
        # 注意: 此处假设您的图像是正方形的
        image_size = int(np.sqrt(self.train_dataset.shape[1]))
        
        # 组合多种变换
        data_transforms = transforms.Compose([
            transforms.ToPILImage(), # 将张量转换为PIL图像以进行变换
            transforms.RandomRotation(15), # 随机旋转
            transforms.RandomAffine(degrees=0, translate=(0.1, 0.1)), # 随机平移
            transforms.ColorJitter(brightness=0.2, contrast=0.2), # 随机改变亮度对比度
            transforms.ToTensor(), # 再次转换为张量
            transforms.Normalize(mean=[0.5], std=[0.5]), # 归一化
        ])
        
        print(f"训练集大小: {self.train_dataset.shape[0]}")
        
        # 使用自定义数据集类
        train_dataset_custom = CustomDataset(self.train_dataset, self.train_labels, transform=data_transforms)
        
        input_size = self.train_dataset.shape[1]
        num_classes = 10
        self.model = CNN(input_size, num_classes).to(self.device)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(self.model.parameters(), lr=0.001, weight_decay=1e-5)
        # 学习率调度器，当验证损失不再降低时降低学习率
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5)

        epochs = 50 # 增加训练周期
        batch_size = 256
        
        train_loader = DataLoader(
            dataset=train_dataset_custom,
            batch_size=batch_size,
            shuffle=True,
            num_workers=4, # 增加工作线程数以加速数据加载
            pin_memory=True
        )

        print(f"\n=== 正在训练模型（{epochs}个周期）... ===")
        self.model.train()
        for epoch in range(epochs):
            running_loss = 0.0
            for images, labels in train_loader:
                # 在这里应用数据增强
                images, labels = images.to(self.device, non_blocking=True), labels.to(self.device, non_blocking=True)
                
                outputs = self.model(images)
                loss = criterion(outputs, labels)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                running_loss += loss.item()
            
            avg_loss = running_loss / len(train_loader)
            scheduler.step(avg_loss) # 更新学习率
            tqdm.write(f'周期 [{epoch+1}/{epochs}], 损失: {avg_loss:.4f}, 当前学习率: {optimizer.param_groups[0]["lr"]}')

        print("\n=== 最终模型训练完成 ===")

    def predict(self, test_dataset):
        self.model.eval()
        with torch.no_grad():
            test_tensor = torch.tensor(test_dataset, dtype=torch.float32).to(self.device)
            # 重新塑形以适应CNN输入
            image_size = int(np.sqrt(test_tensor.size(1)))
            test_tensor = test_tensor.view(-1, 1, image_size, image_size)
            
            # 应用归一化变换
            test_transforms = transforms.Compose([
                transforms.Normalize(mean=[0.5], std=[0.5])
            ])
            # 将归一化应用到每个图像
            normalized_test_tensor = torch.stack([test_transforms(img) for img in test_tensor])
            
            outputs = self.model(normalized_test_tensor)
            _, predicted_labels_tensor = torch.max(outputs.data, 1)
            predicted_labels = predicted_labels_tensor.cpu().numpy()
        return predicted_labels

def calculate_accuracy(file_name, classifier):
    test_dataset, test_labels = load_dataset(file_name)
    if test_dataset is None:
        return 0
    random_indices = np.random.permutation(test_dataset.shape[0])
    test_dataset = test_dataset[random_indices,:]
    test_labels = test_labels[random_indices,:]
    predicted_labels = classifier.predict(test_dataset)
    if isinstance(predicted_labels, np.ndarray):
        if predicted_labels.size != test_labels.size:
            print('错误：输出的标签数量与测试集大小不一致')
            accuracy = 0
        else:
            accuracy = np.mean(predicted_labels.flatten()==test_labels.flatten())
    else:
        print('错误：输出格式有误，必须为ndarray格式')
        accuracy = 0
    return accuracy

if __name__ == '__main__':
    classifier = Classifier()
    classifier.train()

    sum_accuracies = 0
    num_test_datasets = 0

    test_dir = './step1/input'
    test_files = ['test_dataset_clean.pkl'] + [
        f'test_dataset_noise_type{noise}_level{level}.pkl'
        for noise in range(1, 7)
        for level in range(1, 4)
    ]

    print("\n=== 正在对所有测试集进行评估... ===")
    with tqdm(total=len(test_files), desc="正在测试", file=sys.stdout) as pbar:
        for file_name in test_files:
            file_path = os.path.join(test_dir, file_name)
            pbar.set_description(f"正在测试: {file_name}")
            accuracy = calculate_accuracy(file_path, classifier)
            pbar.set_postfix({'正确率': f'{accuracy:.4f}'})
            pbar.update(1)
            sum_accuracies += accuracy
            num_test_datasets += 1
    
    mean_accuracies = sum_accuracies / num_test_datasets
    print(f'\n你在总共{num_test_datasets}个测试集上的平均正确率为：{mean_accuracies:.4f}')