import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision.models import resnet50
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import transforms
from typing import Tuple, Optional
import numpy as np
import timm


class ViT(nn.Module):
    def __int__(self, output_dim):
        super(ViT, self).__init__()
        self.vit = timm.create_model("vit_small_patch16_224", pretrained=False, num_classes=output_dim)

    def forward(self, x):
        return self.vit(x)

def create_transforms():
    train_transform = transforms.Compose([
        transforms.RandomResizedCrop(32, scale=(0.2, 1.0)),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomApply([
            transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)
        ], p=0.8),
        transforms.RandomGrayscale(p=0.2),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
                             std=[0.2023, 0.1994, 0.2010])
    ])
    return train_transform

def augment(x):
    return x + 0.1 * torch.randn_like(x)

def _loss(t, s, C, tps, tpt):
    t = t.detach()
    s = F.softmax(s / tps, dim=1)
    t = F.softmax((t-C)/tpt, dim=1)
    return -(t * torch.log(s)).sum(dim=1).mean()

def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    transform = create_transforms()
    dataset = CIFAR10(root="./data", train=True, transform=transform, download=True)
    loader = DataLoader(dataset, batch_size=256, shuffle=True, num_workers=4, pin_memory=True)

    output_dim = 128
    g_student = ViT(output_dim).to(device)
    g_teacher = ViT(output_dim).to(device)
    g_teacher.load_state_dict(g_student.state_dict())

    c = torch.zeros(output_dim, device=device)
    batch_size = 64
    student_temperature = 0.1
    teacher_temperature = 0.07
    l = 0.6  # 动量参数
    m = 0.6  # 去中心参数数

    optimizer = optim.SGD(g_student.parameters(), lr=1e-3, momentum=0.9, weight_decay=5e-4)

    # 训练
    for x, _ in loader:
        x = x.to(device)
        x1, x2 = augment(x), augment(x)

        s1, t1 = g_student(x1), g_teacher(x1)
        s2, t2 = g_student(x2), g_teacher(x2)

        loss = _loss(s1, t2, c, student_temperature, teacher_temperature) / 2 + _loss(s2, t1, c, student_temperature, teacher_temperature) / 2

        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

        with torch.no_grad():
            for t_param, s_param in zip(g_student.parameters(),
                                        g_teacher.parameters()):
                t_param.data = t_param.data * l + \
                               s_param.data * (1.0 - l)

            c = m * c + (1 - m) * torch.cat([t1, t2]).mean(dim=0)
