import math
import os
import random

import numpy as np
import torch.cuda
import tqdm
import yaml
from torch import optim
from torch.utils.data import random_split, DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
from sklearn.metrics import f1_score, confusion_matrix

from utils.res_save import drawAndSave, save_as_json
from dataset.dataset import PS_Dataset
from models.model import Classifier
from dataset.dataset import statistic_class


def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)  # if you are using multi-GPU.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

def train(config, model, dataloader, optimizer, epoch, device):
    record_loss = []
    record_acc = []
    record_cross_entropy = []
    model.train()

    for (target, text, image) in tqdm.tqdm(dataloader, desc="Training epoch[%d]" % epoch):
        prediction = model(image, text)
        target = target.to(device)

        # 交叉熵损失
        loss_cross_entropy = F.cross_entropy(prediction, target.squeeze(dim=1))
        # 正则化损失
        loss_L2_reg = torch.tensor(0., requires_grad=True)
        for param in model.parameters():
            loss_L2_reg = loss_L2_reg + torch.norm(param, 2)
        loss = loss_cross_entropy + config["loss_L2_weight"] * loss_L2_reg / 2

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        acc = (target.squeeze(dim=1) == torch.argmax(prediction, dim=1)).sum() / target.size(0)

        record_loss.append(loss)
        record_acc.append(acc)
        record_cross_entropy.append(loss_cross_entropy)

    loss_average = sum(record_loss) / len(record_loss)
    acc_average = sum(record_acc) / len(record_acc)
    cross_entropy_average = sum(record_cross_entropy) / len(record_cross_entropy)

    return loss_average, acc_average, cross_entropy_average


@torch.no_grad()
def evaluate(model, dataloader, epoch, device):
    record_loss = []
    record_acc = []

    model.eval()

    for (target, text, image) in tqdm.tqdm(dataloader, desc="Evaluating epoch[%d]" % epoch):
        target = target.to(device)
        prediction = model(image, text)

        loss = F.cross_entropy(prediction, target.squeeze(dim=1))
        acc = (target.squeeze(dim=1) == torch.argmax(prediction, dim=1)).sum() / target.size(0)

        record_loss.append(loss)
        record_acc.append(acc)

    loss_average = sum(record_loss) / len(record_loss)
    acc_average = sum(record_acc) / len(record_acc)
    return loss_average, acc_average


@torch.no_grad()
def test(model, dataloader):
    all_predictions = []
    all_targets = []

    model.eval()

    for (target, text, image) in tqdm.tqdm(dataloader, desc="Testing"):
        prediction = model(image, text)

        _, pred_class = prediction.max(1)

        all_predictions += pred_class.cpu().numpy().tolist()
        all_targets += target.squeeze(dim=1).numpy().tolist()

    f1 = f1_score(all_targets, all_predictions, average=None)
    confusion = confusion_matrix(all_targets, all_predictions)
    return f1, confusion


def main(config):
    # set seed
    seed = 50
    set_seed(seed)

    # records
    train_loss = []
    train_acc = []
    eval_loss = []
    eval_acc = []
    cross_entropy = []

    # tensorboard
    writer = SummaryWriter(log_dir=config["log_dir"])

    # device
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    # model
    model = Classifier(config).float()
    model.to(device)

    # dataset
    dataset = PS_Dataset(config["json_file_path"], config["data_path"], model.img_processor, config)

    # split dataset
    train_size = int(config['train_dataset_rate'] * len(dataset))
    evaluate_test_size = (len(dataset) - train_size) / 2
    train_dataset, evaluate_dataset, test_dataset = random_split(dataset, [train_size, math.floor(evaluate_test_size), math.ceil(evaluate_test_size)])


    print("train_size: %d    eval and test size: %d" % (train_size, evaluate_test_size))
    print("dataset class distribution:")
    print(f"train dataset    {statistic_class(train_dataset)}")
    print(f"evaluate dataset {statistic_class(evaluate_dataset)}")
    print(f"test dataset     {statistic_class(test_dataset)}")

    # DataLoader
    train_dataloader = DataLoader(
        dataset=train_dataset,
        batch_size=config["batch_size"],
        shuffle=True,
        pin_memory=True,
        worker_init_fn=lambda worker_id: np.random.seed(seed + worker_id)
    )
    evaluate_dataloader = DataLoader(
        dataset=evaluate_dataset,
        batch_size=config["batch_size"],
        shuffle=True,
        pin_memory=True,
        worker_init_fn=lambda worker_id: np.random.seed(seed + worker_id)
    )
    test_dataloader = DataLoader(
        dataset=test_dataset,
        batch_size=config["batch_size"],
        shuffle=True,
        pin_memory=True,
        worker_init_fn=lambda worker_id: np.random.seed(seed + worker_id)
    )

    # optimizer
    parameters = model.parameters()
    optimizer = optim.Adam(
        params=parameters,
        lr=config['optimizer']['lr'],
        betas=(config['optimizer']['beta1'], config['optimizer']['beta2'])
    )

    for epoch in range(0, config['total_epoch']):
        loss, acc, loss_cross_entropy = train(config, model, train_dataloader, optimizer, epoch, device)

        if (epoch > config['total_epoch'] / 2 and train_loss[len(train_loss) - 1] > loss) or not os.path.exists(
                os.path.join(config['res_model_path'], 'model.pth')):
            files = [file for file in os.listdir(config['res_model_path']) if 'pt' in file]
            for file in files:
                os.remove(os.path.join(config['res_model_path'], file))
            torch.save(model, os.path.join(config['res_model_path'], f'model_{epoch}.pth'))

        train_loss.append(loss.item())
        train_acc.append(acc.item())
        cross_entropy.append(loss_cross_entropy.item())

        writer.add_scalar('train/loss', loss, epoch)
        writer.add_scalar('train/acc', acc, epoch)
        writer.add_scalar('train/loss_cross_entropy', loss_cross_entropy, epoch)

        # 评估
        loss, acc = evaluate(model, evaluate_dataloader, epoch, device)

        eval_loss.append(loss.item())
        eval_acc.append(acc.item())

        writer.add_scalar('evaluate/loss', loss, epoch)
        writer.add_scalar('evaluate/acc', acc, epoch)

    f1, confusion = test(model, test_dataloader)

    save_as_json(config, 'train_loss', train_loss)
    save_as_json(config, 'train_acc', train_acc)
    save_as_json(config, 'evaluate_loss', eval_loss)
    save_as_json(config, 'evaluate_acc', eval_acc)
    save_as_json(config, 'F1', f1)
    save_as_json(config, 'confusion_matrix', confusion)
    save_as_json(config, 'loss_cross_entropy', cross_entropy)

    # save res
    drawAndSave(train_loss, "train loss", "epoch", "loss", os.path.join(config['res_picture_save_path'], "train_loss.png"))
    drawAndSave(train_acc, "train acc", "epoch", "acc", os.path.join(config['res_picture_save_path'], "train_acc.png"))

    drawAndSave(eval_loss, "evaluate loss", "epoch", "loss", os.path.join(config['res_picture_save_path'], "evaluate_loss.png"))
    drawAndSave(eval_acc, "evaluate acc", "epoch", "accurate", os.path.join(config['res_picture_save_path'], "evaluate_acc.png"))
    drawAndSave(eval_acc, "evaluate acc", "epoch", "accurate", os.path.join(config['res_picture_save_path'], "evaluate_acc.png"))


if __name__ == '__main__':
    config_file_path = r'D:\Project\multimoding\project\CLIP\classify\config.yaml'
    config = yaml.load(open(config_file_path, 'r', encoding='utf-8'), Loader=yaml.Loader)
    main(config)
