import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms

# 超参数配置（关键修改点1）
config = {
    "image_size": 28,
    "patch_size": 7,
    "num_classes": 10,
    "dim": 64,
    "depth": 6,
    "heads": 4,
    "mlp_dim": 128,
    "channels": 3,  # 必须与预处理通道数一致
    "batch_size": 64,
    "lr": 3e-4,
    "epochs": 10,
    "device": "cuda" if torch.cuda.is_available() else "cpu"
}

# 数据预处理（保持三通道转换）
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,)),
    transforms.Lambda(lambda x: x.repeat(3, 1, 1))  # 复制为3通道
])

# 加载数据集（保持不变）
train_dataset = datasets.MNIST(root='./data', train=True, download=False, transform=transform)
test_dataset = datasets.MNIST(root='./data', train=False, download=False, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=config["batch_size"], shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=config["batch_size"], shuffle=False)

# 修正后的ViT模型
class ViTForMNIST(nn.Module):
    def __init__(self, config):
        super().__init__()
        
        # 维度计算（关键修改点2）
        self.num_patches = (config["image_size"] // config["patch_size"]) ** 2
        patch_dim = config["channels"] * (config["patch_size"] ** 2)  # 3*7²=147
        
        self.unfold = nn.Unfold(kernel_size=config["patch_size"], stride=config["patch_size"])  # [B, 147, 16]
        self.linear = nn.Linear(patch_dim, config["dim"])  # 输入147 → 输出64
        
        # CLS Token
        self.cls_token = nn.Parameter(torch.randn(1, 1, config["dim"]))
        
        # 位置编码
        self.pos_embedding = nn.Parameter(
            torch.randn(1, self.num_patches + 1, config["dim"])
        )
        
        # Transformer
        self.transformer = nn.TransformerEncoder(
            encoder_layer=nn.TransformerEncoderLayer(
                d_model=config["dim"],
                nhead=config["heads"],
                dim_feedforward=config["mlp_dim"],
                activation="gelu"
            ),
            num_layers=config["depth"]
        )
        
        # 分类头
        self.mlp_head = nn.Sequential(
            nn.LayerNorm(config["dim"]),
            nn.Linear(config["dim"], config["num_classes"])
        )

    def forward(self, x):
        B, C, H, W = x.shape
        x_unfolded = self.unfold(x)  # [B, 147, 16]
        
        x_unfolded = x_unfolded.transpose(1, 2)  # [B, 16, 147]
        patches = self.linear(x_unfolded)  # 通过Linear层 [B, 16, 64]

        # 添加CLS Token
        cls_tokens = self.cls_token.expand(B, -1, -1)  # [B, 1, 64]
        x = torch.cat((cls_tokens, patches), dim=1)    # [B, 17, 64]
        
        # 位置编码
        x += self.pos_embedding
        
        # Transformer处理
        x = self.transformer(x)

        output = x[:, 0]  # 取CLS Token输出
        
        # 分类输出
        return output

# 初始化模型
model = ViTForMNIST(config).to(config["device"])
optimizer = optim.Adam(model.parameters(), lr=config["lr"])
criterion = nn.CrossEntropyLoss()

# 训练与测试函数（保持不变）
def train():
    model.train()
    total_loss = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(config["device"]), target.to(config["device"])
        optimizer.zero_grad()
        output = model(data)
        print(f"output shape: {output.shape}")
        for i in range(output.shape[0]):
            print(f"output {i}: {output[i].detach().cpu().numpy()}")
            print(f"target {i}: {target[i].detach().cpu().numpy()}")

        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
        if batch_idx % 100 == 0:
            print(f"Train Batch: {batch_idx}/{len(train_loader)} Loss: {loss.item():.4f}")
    return total_loss / len(train_loader)

def test():
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(config["device"]), target.to(config["device"])
            output = model(data)
            test_loss += criterion(output, target).item()
            pred = output.argmax(dim=1, keepdim=True)
            correct += pred.eq(target.view_as(pred)).sum().item()
    test_loss /= len(test_loader)
    accuracy = 100. * correct / len(test_loader.dataset)
    print(f"Test set: Average loss: {test_loss:.4f}, Accuracy: {accuracy:.2f}%")
    return accuracy

# 训练循环
for epoch in range(1, config["epochs"] + 1):
    train_loss = train()
    test_acc = test()
    print(f"Epoch {epoch}: Train Loss: {train_loss:.4f} | Test Acc: {test_acc:.2f}%")
