import timm
import torch.nn as nn
from torch.nn import Sequential, Linear
import torch.nn.functional as F
import torch
from torchvision import transforms, datasets
import tqdm
import os
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys

class ComplementEntropy(nn.Module):

    def __init__(self):
        super(ComplementEntropy, self).__init__()

    # here we implemented step by step for corresponding to our formula
    # described in the paper
    def forward(self, yHat, y):
        self.batch_size = len(y)
        self.classes = 2
        yHat = F.softmax(yHat, dim=1)
        Yg = torch.gather(yHat, 1, torch.unsqueeze(y, 1))
        Yg_ = (1 - Yg) + 1e-7  # avoiding numerical issues (first)
        Px = yHat / Yg_.view(len(yHat), 1)
        Px_log = torch.log(Px + 1e-10)  # avoiding numerical issues (second)
        y_zerohot = torch.ones(self.batch_size, self.classes).scatter_(
            1, y.view(self.batch_size, 1).data.cpu(), 0)
        output = Px * Px_log * y_zerohot.cuda()
        loss = torch.sum(output)
        loss /= float(self.batch_size)
        loss /= float(self.classes)
        return loss

def train(model, device, train_loader, optimizer, criterion):
    model.train()
    running_loss = 0.0
    train_steps = len(train_loader)
    train_bar = tqdm.tqdm(train_loader, file=sys.stdout)
    train_num = len(train_loader.dataset)
    correct = 0

    for images, labels in train_bar:
        images, labels = images.to(device), labels.to(device)
        optimizer.zero_grad()
        output = model(images)
        loss = criterion(output, labels)
        loss.backward()
        optimizer.step()
        # pred = output.argmax(keepdim=True, dim=1)
        pred = torch.argmax(output, dim=1)
        correct += pred.eq(labels.view_as(pred)).sum().item()

        running_loss += loss.item()

        train_bar.desc = "train loss: {:.3f}".format(running_loss / (train_steps * len(images)))

    train_acc = correct / train_num

    return running_loss / train_steps, train_acc

def validate(model, device, val_loader, criterion):
    model.eval()
    val_step = len(val_loader)
    val_num = len(val_loader.dataset)
    correct = 0
    val_loss = 0.0
    with torch.no_grad():
        val_bar = tqdm.tqdm(val_loader, file=sys.stdout)
        for images, labels in val_bar:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)
            pred = outputs.argmax(dim=1, keepdim=True)
            correct += pred.eq(labels.view_as(pred)).sum().item()
            val_loss += loss.item()

    val_acc = correct / val_num

    return val_loss / val_step, val_acc

if __name__ == '__main__':

    save_path = r'../../models/classic_deit_base/'
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    saveCSVPath = os.path.join(save_path, 'valAcc.csv')
    data_dir = r'../../combinations/11'
    batch_size = 256
    num_workers = 8
    epochs = 200
    lr = 1e-3

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))

    data_transforms = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Resize((224, 224)),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])

    assert os.path.exists(data_dir), "{} path does not exist.".format(data_dir)

    train_dataset = datasets.ImageFolder(os.path.join(data_dir, 'train'), data_transforms)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)

    val_dataset = datasets.ImageFolder(os.path.join(data_dir, 'val'), data_transforms)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)

    # model = timm.create_model('vit_base_patch16_224', pretrained=True, num_classes = 2)
    model = timm.create_model('vit_deit_base_distilled_patch16_224', pretrained=True, num_classes = 2)
    for param in model.parameters():
        param.requires_grad = False

    model.head = Sequential(Linear(model.head.in_features, 2))
    for param in model.head:
        param.requires_grad = True

    model.cuda()
    criterion = nn.CrossEntropyLoss()
    # criterion = ComplementEntropy()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-4)

    best_acc = 0.0

    train_losses = []
    train_acces = []
    val_losses = []
    val_acces = []

    for epoch in range(epochs):
        train_loss, train_acc = train(model, device, train_loader, optimizer, criterion)
        val_loss, val_acc = validate(model, device, val_loader, criterion)
        print("epoch [{}/{}], train loss: {:.4f}, train accuracy: {:.4f}, val loss {:.4f}, val accuracy: {:.4f}"
              .format(epoch+1, epochs, train_loss, train_acc, val_loss, val_acc))
        train_losses.append(train_loss)
        train_acces.append(train_acc)
        val_losses.append(val_loss)
        val_acces.append(val_acc)
        if val_acc > best_acc:
            best_acc = val_acc
            torch.save(model.state_dict(), os.path.join(save_path, 'VIT_epoch_' + str(epochs) + '_valacc' + str(val_acc) + '.pth'))
    vl = pd.DataFrame(val_acces)
    vl.to_csv(saveCSVPath)
    # 绘图代码
    plt.plot(np.arange(len(train_losses)), train_losses, label="train loss")
    plt.plot(np.arange(len(train_acces)), train_acces, label="train acc")
    plt.plot(np.arange(len(val_losses)), val_losses, label="valid loss")
    plt.plot(np.arange(len(val_acces)), val_acces, label="valid acc")
    plt.legend()  # 显示图例
    plt.xlabel('epoches')
    plt.title('Model accuracy&loss')
    plt.savefig(save_path+"VIT.png")



