import os
from PIL import Image
import numpy as np
import clip
from loguru import logger
from torch.utils.data import Dataset, DataLoader, ConcatDataset
import torch.optim as optim
from torch.optim import lr_scheduler
import torch.nn as nn
import torch
import torchvision
from torchvision import transforms


idx_to_class = {
    0:"a photo without person",
    1:"a photo of one person",
    2:"a photo of two person",
}
def retain_class_name(target):
    # print(idx_to_class[target])
    return clip.tokenize(idx_to_class[target]).squeeze(dim=0)


if __name__ == "__main__":
    # 创建模型
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net, preprocess = clip.load("ViT-B/32",device=device,jit=False)

    optimizer = optim.Adam(net.parameters(), lr=1e-6,betas=(0.9,0.98),eps=1e-6,weight_decay=0.001)
    scheduler = lr_scheduler.StepLR(
            optimizer, step_size=10, gamma=0.1)

    # 创建损失函数
    loss_img = nn.CrossEntropyLoss()
    loss_txt = nn.CrossEntropyLoss()
        #准备数据
    root = r'/media/lihongsen/home/luoluoluo/dataset/clip_test'
    # 使用torchvision.datasets.ImageFolder读取数据集 指定train 和 test文件夹
    train_data = torchvision.datasets.ImageFolder(os.path.join(root, "train"), transform=preprocess,target_transform=retain_class_name)
    train_iter = torch.utils.data.DataLoader(train_data, batch_size=8, shuffle=True, num_workers=0)
    phase = "train"
    model_name = "clip"
    ckt_gap = 10
    epoches = 100
    logger.add("out.log")
    for epoch in range(epoches):
        scheduler.step()
        total_loss = 0
        batch_num = 0
        # 使用混合精度，占用显存更小
        with torch.cuda.amp.autocast(enabled=True):
            for images,label_tokens in train_iter:
                # 将图片和标签token转移到device设备
                images = images.to(device)
                label_tokens = label_tokens.to(device)
                batch_num += 1
                # 优化器梯度清零
                optimizer.zero_grad()
                with torch.set_grad_enabled(phase == "train"):
                    logits_per_image, logits_per_text = net(images, label_tokens)
                    ground_truth = torch.arange(len(images),dtype=torch.long,device=device)
                    print(logits_per_image)
                    print(ground_truth)
                    cur_loss = (loss_img(logits_per_image,ground_truth) + loss_txt(logits_per_text,ground_truth))/2
                    total_loss += cur_loss
                    if phase == "train":
                        cur_loss.backward()
                        if device == "cpu":
                            optimizer.step()
                        else:
                            optimizer.step()
                            clip.model.convert_weights(net)
                if batch_num % 4 == 0:
                    logger.info('{} epoch:{} loss:{}'.format(phase,epoch,cur_loss))
            if epoch % ckt_gap == 0:
                # checkpoint = {
                #     'it': epoch,
                #     'network': net.state_dict(),
                #     'optimizer': optimizer.state_dict(),
                #     'scheduler': scheduler.state_dict()}
                # torch.save(checkpoint, f"{model_name}_{epoch}_ckpt.pth")
                torch.save(net.state_dict(),f"{model_name}_epoch_{epoch}.pth")
                logger.info(f"{model_name}_epoch_{epoch} saved")
