import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision.models import resnet50
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import transforms
from typing import Tuple, Optional
import numpy as np


class MoCo(nn.Module):
    """MoCo模型实现"""

    def __init__(self, feature_dim: int = 1024, queue_size: int = 4096,
                 momentum: float = 0.999, temperature: float = 0.07,
                 device: Optional[torch.device] = None):
        super().__init__()

        self.feature_dim = feature_dim
        self.queue_size = queue_size
        self.momentum = momentum
        self.temperature = temperature
        self.device = device if device else torch.device("cuda" if torch.cuda.is_available() else "cpu")

        # 编码器网络
        self.encoder_q = self._get_encoder(feature_dim)
        self.encoder_k = self._get_encoder(feature_dim)

        # 初始化键编码器参数与查询编码器一致
        self._init_encoder_k()

        # 禁止键编码器通过梯度更新
        for param in self.encoder_k.parameters():
            param.requires_grad = False

        # 注册队列为buffer，使其能够随模型一起移动设备
        self.queue = torch.randn(feature_dim, queue_size).to(self.device)
        self.queue = F.normalize(self.queue, dim=0)
        self.queue_ptr = 0

    def _get_encoder(self, output_dim: int) -> nn.Module:
        model = resnet50(pretrained=False)
        model.fc = nn.Linear(model.fc.in_features, output_dim)
        return model.to(self.device)

    def _init_encoder_k(self):
        self.encoder_k.load_state_dict(self.encoder_q.state_dict())
        self.encoder_k.to(self.device)

    @torch.no_grad()
    def _momentum_update_encoder_k(self):
        """动量更新键编码器"""
        for param_q, param_k in zip(self.encoder_q.parameters(),
                                    self.encoder_k.parameters()):
            param_k.data = param_k.data * self.momentum + \
                           param_q.data * (1.0 - self.momentum)

    @torch.no_grad()
    def _dequeue_and_enqueue(self, keys: torch.Tensor):
        batch_size = keys.shape[0]

        ptr = self.queue_ptr
        assert self.queue_size % batch_size == 0, "队列大小应该是批量大小的整数倍"

        self.queue[:, ptr:ptr + batch_size] = keys.T
        ptr = (ptr + batch_size) % self.queue_size
        self.queue_ptr = ptr

    def forward(self, im_q: torch.Tensor, im_k: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
        """
        Args:
            im_q: 查询图像 [batch_size, 3, H, W]
            im_k: 键图像 [batch_size, 3, H, W]
        Returns:
            loss: 对比损失
            logits: 相似度分数
        """
        # 查询特征
        q = self.encoder_q(im_q)
        q = F.normalize(q, dim=1)

        # 键特征（无梯度）
        with torch.no_grad():
            # 动量更新键编码器
            self._momentum_update_encoder_k()
            k = self.encoder_k(im_k)
            k = F.normalize(k, dim=1)

        # 计算对比损失
        loss, logits = self.contrastive_loss(q, k)

        # 更新队列
        self._dequeue_and_enqueue(k)

        return loss, logits

    def contrastive_loss(self, q: torch.Tensor, k: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
        batch_size = q.shape[0]

        # 正样本相似度
        pos_sim = torch.bmm(q.view(batch_size, 1, self.feature_dim),
                            k.view(batch_size, self.feature_dim, 1)).squeeze(-1)

        # 负样本相似度（与队列中的样本）
        neg_sim = torch.mm(q, self.queue)

        # 合并相似度
        logits = torch.cat([pos_sim, neg_sim], dim=1)

        # 对比损失标签（正样本在位置0）
        labels = torch.zeros(batch_size, dtype=torch.long).to(self.device)

        loss = F.cross_entropy(logits / self.temperature, labels)

        return loss, logits


def create_transforms():
    train_transform = transforms.Compose([
        transforms.RandomResizedCrop(32, scale=(0.2, 1.0)),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomApply([
            transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)
        ], p=0.8),
        transforms.RandomGrayscale(p=0.2),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
                             std=[0.2023, 0.1994, 0.2010])
    ])
    return train_transform


def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    transform = create_transforms()
    dataset = CIFAR10(root="./data", train=True, transform=transform, download=True)
    loader = DataLoader(dataset, batch_size=256, shuffle=True, num_workers=4, pin_memory=True)

    moco = MoCo(feature_dim=1024, queue_size=4096, device=device)
    optimizer = optim.Adam(moco.parameters(), lr=1e-3)

    moco.train()
    num_epochs = 100
    print_interval = 50

    for epoch in range(num_epochs):
        total_loss = 0.0
        num_batches = 0

        for batch_idx, (images, _) in enumerate(loader):
            images = images.to(device, non_blocking=True)

            im_q = torch.stack([transform(img.cpu()) for img in images]).to(device)
            im_k = torch.stack([transform(img.cpu()) for img in images]).to(device)

            optimizer.zero_grad()
            loss, _ = moco(im_q, im_k)
            loss.backward()
            optimizer.step()

            total_loss += loss.item()
            num_batches += 1

            if batch_idx % print_interval == 0:
                avg_loss = total_loss / num_batches
                print(f"Epoch [{epoch + 1}/{num_epochs}], Batch [{batch_idx}/{len(loader)}], "
                      f"Loss: {avg_loss:.4f}")

        avg_epoch_loss = total_loss / len(loader)
        print(f"Epoch [{epoch + 1}/{num_epochs}] completed, Average Loss: {avg_epoch_loss:.4f}")

        if (epoch + 1) % 10 == 0:
            checkpoint = {
                'epoch': epoch,
                'model_state_dict': moco.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'loss': avg_epoch_loss,
            }
            torch.save(checkpoint, f'moco_checkpoint_epoch_{epoch + 1}.pth')


if __name__ == "__main__":
    main()