import torch
import torch.nn as nn
from torchvision.datasets import CIFAR10
from transformers import BertModel, BertTokenizer
from torchvision.transforms import transforms
from torch.utils.data import DataLoader
import timm
import numpy as np

class ViT(nn.Module):
    def __init__(self, output_dim):
        super(ViT, self).__init__()
        self.vit = timm.create_model('vit_small_patch16_224', pretrained=True, num_classes=output_dim)

    def forward(self, x):
        return self.vit(x)

class TextEncoder(nn.Module):
    def __init__(self):
        super(TextEncoder, self).__init__()

        BERT_LOCAL_PATH = './bert-base-uncased'
        self.model = BertModel.from_pretrained(BERT_LOCAL_PATH)
        self.tokenizer = BertTokenizer.from_pretrained(BERT_LOCAL_PATH)

    def forward(self, texts):
        encoded_input = self.tokenizer(texts, return_tensors='pt', padding=True, truncation=True)
        outputs = self.model(**encoded_input)
        return outputs.last_hidden_state[:, 0, :]

def load_cifar10_dataset():
    transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()])
    train_dataset = CIFAR10(root='./data', train=True, download=True, transform=transform)
    loader = DataLoader(train_dataset, batch_size=4, shuffle=True)
    classes = train_dataset.classes
    return loader, classes

class CLIP(nn.Module):
    def __init__(self, image_output_dim, text_output_dim):
        super(CLIP, self).__init__()
        self.image_encoder = ViT(image_output_dim)
        self.text_encoder = TextEncoder()

        self.W_1 = nn.Parameter(torch.randn(image_output_dim, text_output_dim))
        self.W_t = nn.Parameter(torch.randn(768, text_output_dim))

    def forward(self, images, texts):
        I_f = self.image_encoder(images)  # (B, 3, 224, 224) -> (B, 512)
        T_f = self.text_encoder(texts)  # (B) -> (B, 768)

        I_e = torch.matmul(I_f, self.W_1)  # (B, 512)
        T_e = torch.matmul(T_f, self.W_t)  # (B, 512)

        logits = torch.matmul(I_e, T_e.T)  # (B, B)
        return logits

def main():
    dataset, classes = load_cifar10_dataset()

    clip_model = CLIP(image_output_dim=512, text_output_dim=512)

    for images, labels in dataset:
        texts = [classes[label] for label in labels]
        logits = clip_model(images, texts)  # (B, B)

        labels = torch.arange(logits.shape(0))

        loss_i = torch.nn.CrossEntropyLoss()(logits, labels)
        loss_t = torch.nn.CrossEntropyLoss()(logits.T, labels)

        loss = (loss_i + loss_t) / 2

        print(f"loss is {loss}")
