import os
import math
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from dataset import MyDataset
from config import parsers
from torch_Beit_model import BEiTImageEncoder
from utils import read_split_data
import xlwt
import json
from torch.nn import CrossEntropyLoss


def train_one_epoch(model, optimizer, loss_fn, data_loader, device, epoch, epochs):
    model.train()
    train_loss_sum, count = 0.0, 0
    train_acc_sum, n = 0.0, 0
    for batch_con in data_loader:
        batch_con = tuple(p.to(device) for p in batch_con)
        pred, _ = model(batch_con[0])
        optimizer.zero_grad()
        loss = loss_fn(pred, batch_con[-1])
        predicted = torch.max(pred.data, 1)[-1]
        loss.backward()
        optimizer.step()
        # train_loss
        train_loss_sum += loss
        count += 1
        # train_acc
        train_acc_sum += (predicted == batch_con[-1]).sum().item()
        n += batch_con[-1].shape[0]
    msg = "[{0}/{1}]\tTrain_Loss:{2:.4f}\tTrain_acc:{3:.4f}"
    print(msg.format(epoch + 1, epochs, train_loss_sum / count, train_acc_sum / n))
    return train_loss_sum / count, train_acc_sum / n


def evaluate(model, val_loader, loss_fn, device, epoch, epochs):
    model.eval()
    correct, total = 0, 0
    loss_sum, count = 0, 0
    with torch.no_grad():
        hidden_state = None
        for batch_con in val_loader:
            batch_con = tuple(p.to(device) for p in batch_con)
            pred, hidden_state = model(batch_con[0], hidden_state)
            loss = loss_fn(pred, batch_con[-1])
            predicted = torch.max(pred.data, 1)[-1]
            correct += (predicted.data == batch_con[-1].data).sum().item()
            total += predicted.shape[0]
            loss_sum += loss
            count += 1
    msg = "[{0}/{1}]\tVal_Loss:{2:.4f}\tVal_acc:{3:.4f}"
    print(msg.format(epoch + 1, epochs, loss_sum / count, correct / total))
    return loss_sum / count, correct / total


def main():
    args = parsers()
    if os.path.exists("./beit_model") is False:
        # 新建文件夹
        os.makedirs("./beit_model")
    # 记录训练日志
    tb_writer = SummaryWriter()

    train_images_path, train_images_label, val_images_path, val_images_label = read_split_data(args.data_path)

    data_transform = {
        "train": transforms.Compose([transforms.Resize(256),
                                     transforms.CenterCrop(224),
                                     # transforms.RandomResizedCrop(224),
                                     # transforms.RandomHorizontalFlip(),
                                     transforms.ToTensor(),
                                     transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
        "val": transforms.Compose([transforms.Resize(256),
                                   transforms.CenterCrop(224),
                                   transforms.ToTensor(),
                                   transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])}

    # 实例化训练数据集
    train_dataset = MyDataset(images_path = train_images_path,
                              images_class = train_images_label,
                              transform = data_transform["train"])

    # 实例化验证数据集
    val_dataset = MyDataset(images_path = val_images_path,
                            images_class = val_images_label,
                            transform = data_transform["val"])

    batch_size = args.batch_size
    # nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])  # number of workers
    # print('Using {} dataloader workers every process'.format(nw))
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size = batch_size,
                                               shuffle = True,
                                               pin_memory = False,
                                               num_workers = 0,
                                               collate_fn = train_dataset.collate_fn)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size = batch_size,
                                             shuffle = True,
                                             pin_memory = False,
                                             num_workers = 0,
                                             collate_fn = val_dataset.collate_fn)

    model = BEiTImageEncoder()
    device = torch.device(args.device if torch.cuda.is_available() else "cpu")
    model.to(device)
    images = torch.zeros(1, 3, 224, 224).to(device)  # 要求大小与输入图片的大小一致
    tb_writer.add_graph(model, images, verbose = False)

    if args.weights != "":
        assert os.path.exists(args.weights), "weights file: '{}' not exist.".format(args.weights)
        # 模型参数映射
        with open('./param_name_map.json', 'r') as f:
            map_dict = json.load(f)
        # pretrained_params = torch.load('./beit/beit_base_patch16_224_pt22k.pth', map_location = torch.device('cpu'))
        pretrained_params = torch.load(args.weights, map_location = device)
        for _, param in pretrained_params.items():
            for name, par in param.items():
                for k, v in map_dict.items():
                    if v == name:
                        model.state_dict()[k] = par
                        break

    if args.freeze_layers:
        freeze_para = ["cls_token", "patch_embed.proj.weight", "patch_embed.proj.bias"]
        for name, para in model.named_parameters():
            if name not in freeze_para:
                para.requires_grad_(False)
            else:
                print("training {}".format(name))
    # else:
    #     for name,param in model.named_parameters():
    #         param.requires_grad_(True)

    pg = [p for p in model.parameters() if p.requires_grad]

    optimizer = optim.SGD(pg, lr = args.lr, momentum = 0.9, weight_decay = args.weight_decay)
    lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf  # cosine
    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda = lf)
    loss_fn = CrossEntropyLoss()
    best_acc = 0

    wb = xlwt.Workbook()
    sheet1 = wb.add_sheet("first sheet")
    head_data = ["epoch", "train_loss", "train_acc", "val_loss", "val_acc", "lr", "best"]
    for head in head_data:
        sheet1.write(0, head_data.index(head), head)

    for epoch in range(args.epochs):

        sheet1.write(epoch + 1, 0, epoch + 1)
        sheet1.write(epoch + 1, 5, str(optimizer.state_dict()['param_groups'][0]['lr']))

        # train
        train_loss, train_acc = train_one_epoch(model = model,
                                                optimizer = optimizer,
                                                data_loader = train_loader,
                                                loss_fn = loss_fn,
                                                device = device,
                                                epoch = epoch,
                                                epochs = args.epochs)
        # scheduler.step()

        sheet1.write(epoch + 1, 1, str(train_loss))
        sheet1.write(epoch + 1, 2, str(train_acc))

        # validate
        val_loss, val_acc = evaluate(model = model,
                                     val_loader = val_loader,
                                     loss_fn = loss_fn,
                                     device = device,
                                     epoch = epoch,
                                     epochs = args.epochs)

        sheet1.write(epoch + 1, 3, str(val_loss))
        sheet1.write(epoch + 1, 4, str(val_acc))

        tags = ["train_loss", "train_acc", "val_loss", "val_acc", "learning_rate"]
        tb_writer.add_scalar(tags[0], train_loss, epoch)
        tb_writer.add_scalar(tags[1], train_acc, epoch)
        tb_writer.add_scalar(tags[2], val_loss, epoch)
        tb_writer.add_scalar(tags[3], val_acc, epoch)
        tb_writer.add_scalar(tags[4], optimizer.param_groups[0]["lr"], epoch)

        if val_acc > best_acc:
            best_acc = val_acc
            torch.save(model.state_dict(), args.best_model)
            # torch.save(model.state_dict(), "./weights/model-{}.pth".format(epoch))

    sheet1.write(1, 6, str(best_acc))
    wb.save('./Train_data.xlsx')
    print("The Best Acc = : {:.4f}".format(best_acc))
    tb_writer.close()


if __name__ == '__main__':
    main()
