#encoding=utf8
import numpy as np
import pickle
import os
import sys
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms

# ===== 数据加载 =====
def load_dataset(file_name):
    try:
        with open(file_name, 'rb') as f:
            raw_dataset = pickle.load(f)
    except FileNotFoundError:
        print(f"错误: 文件 {file_name} 未找到。")
        return None, None
    
    try:
        example_image = list(raw_dataset.values())[0][0]
    except Exception as e:
        print(f"错误: 数据集格式不正确 ({e})")
        return None, None

    dataset = np.empty((0, example_image.size))
    labels = np.empty((0, 1))
    total_images = 0
    for i_class, images_list in raw_dataset.items():
        if not images_list: continue
        for image in images_list:
            features = image.flatten() / 255.0
            dataset = np.vstack((dataset, features))
            labels = np.vstack((labels, i_class))
            total_images += 1
    print(f"成功加载 {total_images} 张手写数字图片。")
    return dataset, labels

# ===== 改进 CNN =====
class ImprovedCNN(nn.Module):
    def __init__(self, num_classes=10):
        super().__init__()
        self.relu = nn.ReLU()
        self.pool = nn.MaxPool2d(2,2)
        self.dropout = nn.Dropout(0.5)
        self.conv_dropout = nn.Dropout2d(0.2)

        self.conv1 = nn.Conv2d(1, 64, 3, padding=1)
        self.bn1 = nn.BatchNorm2d(64)
        self.conv2 = nn.Conv2d(64, 128, 3, padding=1)
        self.bn2 = nn.BatchNorm2d(128)
        self.conv3 = nn.Conv2d(128, 256, 3, padding=1)
        self.bn3 = nn.BatchNorm2d(256)

        with torch.no_grad():
            x = torch.zeros(1,1,28,28)
            x = self.pool(self.relu(self.bn1(self.conv1(x))))
            x = self.conv_dropout(x)
            x = self.pool(self.relu(self.bn2(self.conv2(x))))
            x = self.conv_dropout(x)
            x = self.pool(self.relu(self.bn3(self.conv3(x))))
            x = self.conv_dropout(x)
            self.flatten_size = x.numel()
        
        self.fc1 = nn.Linear(self.flatten_size, 256)
        self.fc2 = nn.Linear(256, num_classes)

    def forward(self, x):
        x = self.pool(self.relu(self.bn1(self.conv1(x))))
        x = self.conv_dropout(x)
        x = self.pool(self.relu(self.bn2(self.conv2(x))))
        x = self.conv_dropout(x)
        x = self.pool(self.relu(self.bn3(self.conv3(x))))
        x = self.conv_dropout(x)
        x = x.view(x.size(0), -1)
        x = self.relu(self.fc1(x))
        x = self.dropout(x)
        x = self.fc2(x)
        return x

# ===== 分类器封装 =====
class Classifier:
    def __init__(self, device='cuda'):
        self.device = torch.device(device if torch.cuda.is_available() else 'cpu')
        self.model = ImprovedCNN().to(self.device)
        self.train_dataset, self.train_labels = load_dataset('./step1/input/training_dataset.pkl')
        if self.train_dataset is None:
            raise RuntimeError("训练数据集加载失败。")
        
        # 数据增强
        self.transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomRotation(15),
            transforms.RandomAffine(0, translate=(0.2,0.2)),
            transforms.ColorJitter(brightness=0.2, contrast=0.2),
            transforms.ToTensor()
        ])

    def train(self, epochs=50, batch_size=64, lr=0.001):
        print("=== 正在训练 CNN 模型... ===")
        X = torch.tensor(self.train_dataset, dtype=torch.float32).reshape(-1,1,28,28)
        y = torch.tensor(self.train_labels.ravel(), dtype=torch.long)

        class AugDataset(torch.utils.data.Dataset):
            def __init__(self, X, y, transform):
                self.X = X
                self.y = y
                self.transform = transform
            def __len__(self):
                return len(self.X)
            def __getitem__(self, idx):
                img = self.X[idx].squeeze(0).numpy()
                # 加轻噪声
                img = img + np.random.normal(0, 0.05, img.shape)
                img = np.clip(img, 0, 1)
                img = self.transform(img)
                return img, self.y[idx]

        loader = DataLoader(AugDataset(X, y, self.transform), batch_size=batch_size, shuffle=True)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(self.model.parameters(), lr=lr, weight_decay=1e-3)
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)

        self.model.train()
        for epoch in range(epochs):
            total_loss = 0
            for inputs, labels in loader:
                inputs, labels = inputs.to(self.device), labels.to(self.device)
                optimizer.zero_grad()
                loss = criterion(self.model(inputs), labels)
                loss.backward()
                optimizer.step()
                total_loss += loss.item()
            scheduler.step()
            print(f"Epoch [{epoch+1}/{epochs}] Loss: {total_loss/len(loader):.4f}")
        print("=== 模型训练完成 ===")

    def predict(self, test_dataset):
        X_test = torch.tensor(test_dataset, dtype=torch.float32).reshape(-1,1,28,28).to(self.device)
        self.model.eval()
        with torch.no_grad():
            outputs = self.model(X_test)
            _, pred = torch.max(outputs,1)
        return pred.cpu().numpy()

# ===== 准确率计算 =====
def calculate_accuracy(file_name, classifier):
    test_dataset, test_labels = load_dataset(file_name)
    if test_dataset is None:
        return 0
    idx = np.random.permutation(test_dataset.shape[0])
    test_dataset, test_labels = test_dataset[idx,:], test_labels[idx,:]
    predicted_labels = classifier.predict(test_dataset)
    if predicted_labels.size != test_labels.size:
        print('错误：输出的标签数量与测试集大小不一致')
        return 0
    return np.mean(predicted_labels.flatten()==test_labels.flatten())

# ===== 主程序 =====
if __name__ == '__main__':
    classifier = Classifier()
    classifier.train(epochs=50, batch_size=64, lr=0.001)

    sum_accuracies = 0
    num_test_datasets = 0
    test_dir = './step1/input'
    test_files = ['test_dataset_clean.pkl'] + [
        f'test_dataset_noise_type{noise}_level{level}.pkl'
        for noise in range(1, 7)
        for level in range(1, 4)
    ]

    print("\n=== 正在对所有测试集进行评估... ===")
    with tqdm(total=len(test_files), desc="正在测试", file=sys.stdout) as pbar:
        for file_name in test_files:
            file_path = os.path.join(test_dir, file_name)
            pbar.set_description(f"正在测试: {file_name}")
            acc = calculate_accuracy(file_path, classifier)
            pbar.set_postfix({'正确率': f'{acc:.4f}'})
            pbar.update(1)
            sum_accuracies += acc
            num_test_datasets += 1

    mean_acc = sum_accuracies / num_test_datasets
    print(f'\n你在总共{num_test_datasets}个测试集上的平均正确率为：{mean_acc:.4f}')
