import torch
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import clip
import numpy as np
from torch.optim import lr_scheduler
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import Dataset
import os
import numpy


# standardlize title
def generate_image_titles(labels, classes):
    return [f"a photo of a {classes[label]}" for label in labels]


# dataset
class ImageTitleDataset(Dataset):
    def __init__(self, dataset, classes, transform=None):
        self.dataset = dataset
        self.classes = classes
        self.transform = transform
        self.titles = generate_image_titles(dataset.targets, classes)
        self.titles_tokenized = clip.tokenize(self.titles)

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, idx):
        image, label = self.dataset[idx]
        title = self.titles_tokenized[idx]
        image = self.transform(image)
        return image, title


# loss function
class MACL(object):
    def __init__(self, tau_init=0.1, alpha=0.5, A0=0):
        super(MACL, self).__init__()
        self.tau_init = tau_init
        self.alpha = alpha
        self.A0 = A0

    def __call__(self, fx, gx):
        logits = fx @ gx.T
        A=torch.mean(logits.diag()).detach()
        tau = self.tau_init*(1. + self.alpha*(A-self.A0))

        P = torch.softmax(logits/tau, dim=1).diag()
        W = 1. / (1 - P)
        loss = -W.detach() * torch.log(P)
        return loss.mean()


def train_epoch(model, dataloader, optimizer, criterion_img, criterion_txt, epoch):
    model.train()
    total_img_loss = 0
    total_txt_loss = 0

    for images, labels in dataloader:
        optimizer.zero_grad()

        image_inputs = images.to(device)
        text_inputs = labels.to(device)

        logits_per_image, logits_per_text = model(image_inputs, text_inputs)
        ground_truth = torch.arange(len(images), dtype=torch.long, device=device)

        # img_loss = criterion_img(logits_per_image, labels)
        # txt_loss = criterion_txt(logits_per_text, labels)
        img_loss = criterion_img(logits_per_image, ground_truth)
        txt_loss = criterion_txt(logits_per_text, ground_truth)

        loss = (img_loss + txt_loss) / 2
        loss.backward()
        optimizer.step()

        total_img_loss += img_loss.item() * labels.size(0)
        total_txt_loss += txt_loss.item() * labels.size(0)

    epoch_img_loss = total_img_loss / len(dataloader.dataset)
    epoch_txt_loss = total_txt_loss / len(dataloader.dataset)
    epoch_loss = (epoch_txt_loss + epoch_img_loss) / 2

    print(f'Epoch {epoch}, Train Loss: {epoch_loss:.4f}, Train Loss (Image): {epoch_img_loss:.4f}, Train Loss (Text): {epoch_txt_loss:.4f}')


# 评估过程
def evaluate_epoch(model, dataloader, criterion_img, criterion_txt):
    model.eval()
    total_img_loss = 0
    total_txt_loss = 0

    with torch.no_grad():
        for images, labels in dataloader:
            image_inputs = images.to(device)
            text_inputs = labels.to(device)

            logits_per_image, logits_per_text = model(image_inputs, text_inputs)
            ground_truth = torch.arange(len(images), dtype=torch.long, device=device)

            # img_loss = criterion_img(logits_per_image, labels)
            # txt_loss = criterion_txt(logits_per_text, labels)
            img_loss = criterion_img(logits_per_image, ground_truth)
            txt_loss = criterion_txt(logits_per_text, ground_truth)

            total_img_loss += img_loss.item() * labels.size(0)
            total_txt_loss += txt_loss.item() * labels.size(0)

    epoch_img_loss = total_img_loss / len(dataloader.dataset)
    epoch_txt_loss = total_txt_loss / len(dataloader.dataset)
    epoch_loss = (epoch_txt_loss + epoch_img_loss) / 2

    print(f'Train Loss: {epoch_loss:.4f}, Test Loss (Image): {epoch_img_loss:.4f}, Test Loss (Text): {epoch_txt_loss:.4f}')

    return epoch_loss


def save_model(epoch, model, optimizer, loss):
    model_path = os.path.join("folder", f"clip_model_checkpoint_{epoch}.pt")
    torch.save({
        'epoch': epoch,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'average_test_loss': loss,
    }, model_path)


if __name__ == '__main__':
    EPOCHS = 20
    BATCH_SIZE = 64

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model, preprocess = clip.load('ViT-B/32', device=device, jit=False)
    model = model.to(device)

    train = datasets.CIFAR10(root='./data', download=True, train=True)
    test = datasets.CIFAR10(root='./data', download=True, train=False)
    train_dataset = ImageTitleDataset(train, train.classes, transform=preprocess)
    test_dataset = ImageTitleDataset(test, test.classes, transform=preprocess)
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)

    criterion_img = nn.CrossEntropyLoss()
    criterion_txt = nn.CrossEntropyLoss()
    # criterion_img = MACL()
    # criterion_txt = MACL()
    optimizer = optim.Adam(model.parameters(), lr=1e-6, betas=(0.9, 0.98), eps=5e-5, weight_decay=0.1)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)

    for epoch in range(EPOCHS):
        train_epoch(model, train_loader, optimizer, criterion_img, criterion_txt, epoch)
        loss = evaluate_epoch(model, test_loader, criterion_img, criterion_txt)
        # save_model(epoch, model, optimizer, loss)
        scheduler.step()
