from transformers import AdamW, ViTImageProcessor, ViTForImageClassification, ViTModel  # 图像特征提取器
from PIL import Image
from datasets import load_from_disk, load_dataset
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
import matplotlib.pyplot as plt
from transformers.optimization import get_scheduler

# 把图像转换为数据的工具
feature_extractor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224')

# 加载数据集
dataset = load_from_disk('../data')


# 数据预处理，使用特征加载器把每一张图像抽取成数据，并加上标签
def transform(data):
    inputs = feature_extractor(data['image'], return_tensor='pt')
    inputs['labels'] = data['labels']
    return inputs


dataset = dataset.with_transform(transform)


# 数据集加载器的回调函数
def collate_fn(data):
    pixel_values = [i['pixel_values'] for i in data]
    labels = [i['labels'] for i in data]
    pixel_values = torch.tensor(np.array(pixel_values))
    labels = torch.LongTensor(labels)

    return {'pixel_values': pixel_values, 'labels': labels}


# 定义数据加载器
train_dataloader = DataLoader(
    dataset=dataset['train'],
    batch_size=8,
    shuffle=True,
    drop_last=True,
    collate_fn=collate_fn
)


# 加载模型
# model = ViTForImageClassification.from_pretrained('../model/google_vit-base-patch16-224-in21k', num_labels=3)

# 定义下游任务模型
class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.pretrained = ViTModel.from_pretrained('../model/google_vit-base-patch16-224-in21k')
        self.fc = nn.Linear(768, 3)

        # 加载预训练模型参数
        parameters = ViTForImageClassification.from_pretrained('../model/google_vit-base-patch16-224-in21k',
                                                               num_labels=3)
        self.fc.load_state_dict(parameters.classifier.state_dict())
        self.criterion = nn.CrossEntropyLoss()

    def forward(self, pixel_values, labels):
        logits = self.pretrained(pixel_values=pixel_values)
        logits = logits.pooler_output
        logits = self.fc(logits)
        loss = self.criterion(logits, labels)

        return {'loss': loss, 'logits': logits}


model = Model()
# 统计参数量
print(sum(i.numel() for i in model.parameters()))


def show(image, out, label):
    plt.figure(figsize=(16, 4))
    for i in range(4):
        plt.subplot(1, 4, i + 1)

        img = image[i].clone()
        img = img.permute(1, 2, 0)
        img = img - img.min().item()
        img = img / img.max().item()
        img = img * 255
        img = Image.fromarray(img.numpy().astype('uint8'), mode='RGB')
        img = img.resize((500, 500))

        plt.imshow(img)

        title = '%s / %s' % (out[i], label[i])
        plt.title(title, y=-0.1)

        plt.axis('off')

    plt.show()


def test():
    model.eval()

    names = dataset['test'].features['labels'].names

    loader_test = torch.utils.data.DataLoader(
        dataset=dataset['test'],
        batch_size=8,
        collate_fn=collate_fn,
        shuffle=True,
        drop_last=True,
    )

    correct = 0
    total = 0
    for i, data in enumerate(loader_test):
        with torch.no_grad():
            out = model(**data)

        out = out['logits'].argmax(dim=1)
        correct += (out == data['labels']).sum().item()
        total += 8

        if i % 1 == 0:
            image = data['pixel_values'][:4]
            out = [names[j] for j in out]
            label = [names[j] for j in data['labels']]
            # show(image, out, label)

        if i == 4:
            break

    print(correct / total)


test()


def train():
    optimizer = AdamW(model.parameters(), lr=2e-4)
    scheduler = get_scheduler(name='linear',
                              num_warmup_steps=0,
                              num_training_steps=len(train_dataloader),
                              optimizer=optimizer)

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model.train()
    model.to(device)
    for i, data in enumerate(train_dataloader):
        for k in data.keys():
            data[k] = data[k].to(device)

        out = model(**data)
        loss = out['loss']

        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)

        optimizer.step()
        scheduler.step()

        optimizer.zero_grad()
        model.zero_grad()

        if i % 10 == 0:
            out = out['logits'].argmax(dim=1)
            accuracy = (data['labels'] == out).sum().item() / 16
            lr = optimizer.state_dict()['param_groups'][0]['lr']
            print(i, loss.item(), accuracy, lr)

    model.to('cpu')
    torch.save(model, '../model/my_model.pkl')


train()

model = torch.load('../model/my_model.pkl')
test()
