import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
import struct
import os
import gzip
from sklearn.metrics import accuracy_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA


# 支持.gz压缩文件的MNIST数据集加载器
class MNISTDataset(Dataset):
    def __init__(self, images_path, labels_path):
        # 加载图像数据
        with gzip.open(images_path, 'rb') as f:
            magic, num, rows, cols = struct.unpack('>IIII', f.read(16))
            self.images = np.frombuffer(f.read(), dtype=np.uint8)

            # 检查数据大小是否正确
            expected_size = num * rows * cols
            if len(self.images) != expected_size:
                raise ValueError(f"Expected {expected_size} bytes, got {len(self.images)}")

            self.images = self.images.reshape(num, rows * cols)

        # 加载标签数据
        with gzip.open(labels_path, 'rb') as f:
            magic, num = struct.unpack('>II', f.read(8))
            self.labels = np.frombuffer(f.read(), dtype=np.uint8)

            # 检查标签数量是否匹配图像数量
            if len(self.labels) != num:
                raise ValueError(f"Number of labels ({len(self.labels)}) doesn't match number of images ({num})")

        # 转换为PyTorch张量并归一化
        self.images = torch.tensor(self.images, dtype=torch.float32) / 255.0
        self.labels = torch.tensor(self.labels, dtype=torch.long)

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return self.images[idx], self.labels[idx]


# 设置路径（假设文件是.gz格式）
data_dir = './MNIST/'
train_images_path = os.path.join(data_dir, 'train-images-idx3-ubyte.gz')
train_labels_path = os.path.join(data_dir, 'train-labels-idx1-ubyte.gz')
test_images_path = os.path.join(data_dir, 't10k-images-idx3-ubyte.gz')
test_labels_path = os.path.join(data_dir, 't10k-labels-idx1-ubyte.gz')

# 验证文件是否存在
for path in [train_images_path, train_labels_path, test_images_path, test_labels_path]:
    if not os.path.exists(path):
        # 尝试不加.gz后缀查找
        path_without_gz = path[:-3]  # 去掉.gz后缀
        if os.path.exists(path_without_gz):
            # 如果找到未压缩的文件，更新路径
            train_images_path = train_images_path[:-3] if 'train-images' in path else train_images_path
            train_labels_path = train_labels_path[:-3] if 'train-labels' in path else train_labels_path
            test_images_path = test_images_path[:-3] if 't10k-images' in path else test_images_path
            test_labels_path = test_labels_path[:-3] if 't10k-labels' in path else test_labels_path
            break
        else:
            available_files = [f for f in os.listdir(data_dir) if f.endswith(('.gz', '.ubyte'))]
            raise FileNotFoundError(
                f"MNIST data file not found: {path}\n"
                f"Available files in {data_dir}: {available_files}\n"
                "请确认:\n"
                "1. 文件是否在指定目录\n"
                "2. 文件名是否正确\n"
                "3. 文件扩展名是.gz还是.ubyte"
            )

try:
    # 创建数据集
    train_dataset = MNISTDataset(train_images_path, train_labels_path)
    test_dataset = MNISTDataset(test_images_path, test_labels_path)

    print("MNIST数据集加载成功!")
    print(f"训练集大小: {len(train_dataset)}")
    print(f"测试集大小: {len(test_dataset)}")

except Exception as e:
    print(f"加载MNIST数据集时出错: {str(e)}")
    print("可能的原因:")
    print("1. 文件损坏或不完整")
    print("2. 文件格式不是标准的MNIST二进制格式")
    print("3. 文件路径或名称不正确")
    print(f"当前使用的路径:\n"
          f"训练图像: {train_images_path}\n"
          f"训练标签: {train_labels_path}\n"
          f"测试图像: {test_images_path}\n"
          f"测试标签: {test_labels_path}")
    exit()

# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)


# 1. 感知机实现
class Perceptron(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(Perceptron, self).__init__()
        self.linear = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        return self.linear(x)


# 初始化模型
input_dim = 28 * 28
output_dim = 10
perceptron = Perceptron(input_dim, output_dim)

# 训练配置
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(perceptron.parameters(), lr=0.01)
epochs = 10

# 训练循环
for epoch in range(epochs):
    running_loss = 0.0
    for i, (images, labels) in enumerate(train_loader):
        optimizer.zero_grad()
        outputs = perceptron(images)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()

    print(f'Epoch [{epoch + 1}/{epochs}], Loss: {running_loss / len(train_loader):.4f}')

# 评估感知机
perceptron.eval()
correct = 0
total = 0
with torch.no_grad():
    for images, labels in test_loader:
        outputs = perceptron(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

print(f'Perceptron Test Accuracy: {100 * correct / total:.2f}%')

# 2. LDA实现
# 准备数据
X_train = train_dataset.images.numpy()
y_train = train_dataset.labels.numpy()
X_test = test_dataset.images.numpy()
y_test = test_dataset.labels.numpy()

# 训练和评估LDA
lda = LDA()
lda.fit(X_train, y_train)
y_pred = lda.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f'LDA Test Accuracy: {accuracy:.4f}')

# 结果比较
print("\n模型性能比较:")
print(f"感知机准确率: {100 * correct / total:.2f}%")
print(f"LDA准确率: {100 * accuracy:.2f}%")