import torch
from torch import nn, optim
import torch.nn.functional as F
import time
from datetime import timedelta, datetime
import numpy as np
from sklearn import metrics
import math

from config import Config
from read_data import DatasetIter
from Adversiral import AddGaussianNoise, FGM, PGD
from Model import BertClassifier
from torch.utils.data import DataLoader
from Regularization import Regularization
from Loss import SCE, SCE_FGM
from pytorch_bert.optimization import BertAdam


DEVICE = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")


def get_time_dif(start_time):
    """获取已使用时间"""
    end_time = time.time()
    time_dif = end_time - start_time
    return timedelta(seconds=int(round(time_dif)))


def acc_eval(preds, y):
    correct = torch.eq(preds, y).float()
    acc = correct.sum() / len(correct)
    return acc.item()


def rampup(epoch, end):
    if epoch <= end:
        T = float(epoch) / float(end)
    else:
        T = 1
    return math.exp(-5 * (1 - T) ** 2)


def logfile(msg, file='./result.txt'):
    print(msg)
    with open(file, 'a', encoding='UTF-8') as f:
        f.write(msg+'\n')


def bert_evaluate(model, config, criterion, data):
    model.to(DEVICE)
    model.eval()

    acc_list = []
    loss_list = []
    y_pred = []
    y_data = []

    valset = DatasetIter(data=data, device=DEVICE)
    val_loader = DataLoader(dataset=valset, batch_size=config.batch_size, num_workers=0, shuffle=False)

    with torch.no_grad():
        for data in val_loader:
            pred = model.forward(data)

            y_pred.extend(torch.max(pred, dim=1)[1].cpu().numpy())
            y_data.extend(data[1].cpu().numpy())

            acc = acc_eval(torch.max(pred, dim=1)[1], data[1])
            acc_list.append(acc)

            loss = criterion(pred, data[1])
            loss_list.append(loss.item())

    avg_acc = np.mean(acc_list)
    avg_loss = np.mean(loss_list)

    return y_data, y_pred, avg_acc, avg_loss


def bert_test(config):
    print('testing.......')
    criterion = nn.CrossEntropyLoss()
    model = BertClassifier(config)
    model.load_state_dict(torch.load(config.model_name))
    test = np.load(file='dbpedia/test-32.npy', allow_pickle=True)
    y_data, y_pred, test_acc_mean, test_loss_mean = bert_evaluate(model, config, criterion, test)

    msg = 'Test Loss: {0:>6.2}, Test Acc: {1:>7.2%}\n'
    with open('./result3.txt', 'a', encoding='UTF-8') as f:
        f.write(msg.format(test_loss_mean, test_acc_mean))
        # f.write(f'alpha={data.config.alpha},beta={data.config.beta},A={data.config.A}\n')
        f.write('========================================\n')

    print(msg.format(test_loss_mean, test_acc_mean))
    return test_acc_mean


def bert_pmgd(config):
    if config.sce_flag:
        ce_loss = SCE(config)
    else:
        ce_loss = nn.CrossEntropyLoss()

    if config.sce_fgm_flag:
        l2_loss = SCE_FGM(config)
    else:
        l2_loss = nn.MSELoss()
    kl_loss = nn.KLDivLoss()

    model = BertClassifier(config)
    # model.load_state_dict(torch.load('./model.pt'))
    model.to(DEVICE)

    noise = AddGaussianNoise()

    logfile(
        f'pmgd 混合 一路noise一路无noise ce/sce/mse num_epochs:{config.num_epochs}, 带标签:{config.label_num}, 无标签:{config.unlabel_num}')

    start_time = time.time()
    time_dif = get_time_dif(start_time)
    logfile(f"Time usage:, {time_dif}")

    best_val_acc = 0.0
    val_loss_list = []
    val_acc_list = []

    train = np.load(file='dbpedia/train-32.npy', allow_pickle=True)
    val = np.load(file='dbpedia/val-32.npy', allow_pickle=True)

    rng = np.random.RandomState(246)  # 135 246
    indices = np.arange(len(train))
    rng.shuffle(indices)
    train = train[indices]

    if config.err_rate > 0.0:
        err = np.random.RandomState(456)  # 456
        err_idx = err.choice(config.label_num,
                             size=int(config.label_num * config.err_rate),
                             replace=False)
        for i in err_idx:
            train[i][1] += err.randint(low=1, high=config.num_classes)
            if train[i][1] >= config.num_classes:
                train[i][1] -= config.num_classes

    for item in train[-config.unlabel_num:]:
        item[1] = config.num_classes
    train = np.append(train[:config.label_num], train[-config.unlabel_num:], axis=0)
    rng.shuffle(train)


    # param_optimizer = list(model.named_parameters())
    # no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    # optimizer_grouped_parameters = [
    #     {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
    #     {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
    # optimizer = BertAdam(optimizer_grouped_parameters,
    #                      lr=1e-5,
    #                      warmup=0.05,
    #                      t_total=len(train) * config.num_epochs)
    optimizer = optim.Adam(model.parameters(), lr=1e-5)

    for epoch in range(config.num_epochs):
        logfile(f'Epoch:{epoch}')
        train_loss_list = []
        train_acc_list = []
        pgd = PGD(model)  # 这里要改成 emb_name='word_embeddings'

        trainset = DatasetIter(data=train, device=DEVICE)
        train_loader = DataLoader(dataset=trainset, batch_size=config.batch_size, num_workers=0, shuffle=False)

        for batch_index, data in enumerate(train_loader):
            model.train()

            label_idx = (data[1] != config.num_classes).nonzero()
            label_idx = label_idx.view(label_idx.shape[0])

            y_logits = model.forward(data)
            y_noise = model.forward(data, noise)

            # l2 = rampup(epoch, end=config.num_epochs) * kl_loss(input=F.log_softmax(y_noise, dim=-1), target=F.softmax(y_logits, dim=-1))
            # l2.backward(retain_graph=True)
            l2 = rampup(10, end=config.num_epochs) * l2_loss(input=y_noise, target=y_logits)
            l2.backward(retain_graph=True)

            # pgd.backup_grad()
            # for t in range(pgd.attack_num):
            #     pgd.attack(is_first_attack=(t == 0), emb_name='word_embeddings')
            #     if t != pgd.attack_num-1:
            #         model.zero_grad()
            #     else:
            #         pgd.restore_grad()
            #     y_pgd = model.forward(data)
            #     l2_pgd = rampup(epoch, end=config.rampup_end) * l2_loss(input=y_pgd, target=y_logits)
            #     l2_pgd.backward(retain_graph=True)
            #     # l2_pgd = rampup(epoch, end=config.num_epochs) * kl_loss(input=F.log_softmax(y_pgd, dim=-1),
            #     #                                                         target=F.softmax(y_logits, dim=-1))
            #     # l2_pgd.backward(retain_graph=True)
            # pgd.restore(emb_name='word_embeddings')

            pgd.backup_grad()
            l2_pgd_max = None
            l2_max = 0.0
            for t in range(pgd.attack_num):
                pgd.attack(is_first_attack=(t == 0), emb_name='word_embeddings')
                model.zero_grad()
                y_pgd = model(data)
                l2_pgd = rampup(10, end=config.num_epochs) * l2_loss(input=y_pgd, target=y_logits)
                l2_pgd.backward(retain_graph=True)
                # l2_pgd = rampup(epoch, end=config.num_epochs) * kl_loss(input=F.log_softmax(y_pgd, dim=-1),
                #                                                         target=F.softmax(y_logits, dim=-1))
                # l2_pgd.backward(retain_graph=True)
                if l2_pgd.item() >= l2_max:
                    l2_pgd_max = l2_pgd
                    l2_max = l2_pgd.item()
            pgd.restore_grad()
            l2_pgd_max.backward(retain_graph=True)
            pgd.restore(emb_name='word_embeddings')

            if not len(label_idx) == 0:
                reg_loss = Regularization(model, config.weight_decay)
                ce = ce_loss(input=y_logits[label_idx], target=data[1][label_idx]) + reg_loss.forward()
                ce.backward(retain_graph=False)

            acc = acc_eval(torch.max(y_logits, dim=1)[1], data[1])  # 计算每个batch的平均准确率
            train_acc_list.append(acc)

            optimizer.step()
            model.zero_grad()

            if batch_index % config.print_per_batch == 0:
                train_loss_mean = 0.0
                train_acc_mean = np.mean(train_acc_list)
                train_loss_list = []
                train_acc_list = []

                _, _, val_acc_mean, val_loss_mean = bert_evaluate(model, config, ce_loss, val)
                val_acc_list.append(val_acc_mean)
                val_loss_list.append(val_loss_mean)

                if val_acc_mean > best_val_acc:
                    torch.save(model.state_dict(), config.model_name)
                    best_val_acc = val_acc_mean
                    improved_str = '*'
                else:
                    improved_str = ''

                time_dif = get_time_dif(start_time)
                msg = 'Iter: {0:>6}, Train Loss: {1:>7.2}, Train Acc: {2:>7.2%},' \
                      + ' Validation Loss: {3:>7.2}, Validation Acc: {4:>7.2%}, Time: {5} {6}'
                logfile(msg.format(batch_index, train_loss_mean, train_acc_mean,
                                   val_loss_mean, val_acc_mean, time_dif, improved_str))
    logfile('最佳val acc: {0:>7.2%}'.format(best_val_acc))
    return best_val_acc


def bert_vat(config):
    if config.sce_flag:
        ce_loss = SCE(config)
    else:
        ce_loss = nn.CrossEntropyLoss()

    if config.sce_fgm_flag:
        l2_loss = SCE_FGM(config)
    else:
        l2_loss = nn.MSELoss()
    kl_loss = nn.KLDivLoss()

    model = BertClassifier(config)
    # model.load_state_dict(torch.load('./model.pt'))
    model.to(DEVICE)

    noise = AddGaussianNoise()

    logfile(
        f'vat 混合 一路noise一路无noise ce/sce/mse num_epochs:{config.num_epochs}, 带标签:{config.label_num}, 无标签:{config.unlabel_num}')

    start_time = time.time()
    time_dif = get_time_dif(start_time)
    logfile(f"Time usage:, {time_dif}")

    best_val_acc = 0.0
    val_loss_list = []
    val_acc_list = []

    train = np.load(file='dbpedia/train-32.npy', allow_pickle=True)
    val = np.load(file='dbpedia/val-32.npy', allow_pickle=True)

    rng = np.random.RandomState(246)  # 135 246
    indices = np.arange(len(train))
    rng.shuffle(indices)
    train = train[indices]

    err = np.random.RandomState(456)  # 456
    err_idx = err.choice(config.label_num,
                         size=int(config.label_num * config.err_rate),
                         replace=False)
    for i in err_idx:
        train[i][1] += err.randint(low=1, high=config.num_classes)
        if train[i][1] >= config.num_classes:
            train[i][1] -= config.num_classes

    for item in train[-config.unlabel_num:]:
        item[1] = config.num_classes
    train = np.append(train[:config.label_num], train[-config.unlabel_num:], axis=0)
    rng.shuffle(train)

    # param_optimizer = list(model.named_parameters())
    # no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    # optimizer_grouped_parameters = [
    #     {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
    #     {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
    # optimizer = BertAdam(optimizer_grouped_parameters,
    #                      lr=1e-5,
    #                      warmup=0.05,
    #                      t_total=len(train) * config.num_epochs)
    optimizer = optim.Adam(model.parameters(), lr=1e-5)

    for epoch in range(config.num_epochs):
        logfile(f'Epoch:{epoch}')
        train_loss_list = []
        train_acc_list = []
        fgm = FGM(model)

        trainset = DatasetIter(data=train, device=DEVICE)
        train_loader = DataLoader(dataset=trainset, batch_size=config.batch_size, num_workers=0, shuffle=False)

        for batch_index, data in enumerate(train_loader):
            model.train()

            label_idx = (data[1] != config.num_classes).nonzero()
            label_idx = label_idx.view(label_idx.shape[0])

            y_logits = model.forward(data)
            y_noise = model.forward(data, noise)

            l2 = rampup(epoch, end=config.num_epochs) * kl_loss(input=F.log_softmax(y_noise, dim=-1), target=F.softmax(y_logits, dim=-1))
            l2.backward(retain_graph=True)
            # l2 = rampup(epoch, end=config.num_epochs) * l2_loss(input=y_noise, target=y_logits)
            # l2.backward(retain_graph=True)

            fgm.attack(epsilon=1.0, emb_name='word_embeddings')
            if not config.pifgm_flag:
                model.zero_grad()

            y_fgm = model.forward(data)
            fgm.restore(emb_name='word_embeddings')

            l2_fgm = rampup(epoch, end=config.num_epochs) * kl_loss(input=F.log_softmax(y_fgm, dim=-1), target=F.softmax(y_logits, dim=-1))
            l2_fgm.backward(retain_graph=True)
            # l2_fgm = rampup(epoch, end=config.num_epochs) * l2_loss(input=y_fgm, target=y_logits)
            # l2_fgm.backward(retain_graph=True)

            if not len(label_idx) == 0:
                reg_loss = Regularization(model, config.weight_decay)
                ce = ce_loss(input=y_logits[label_idx], target=data[1][label_idx]) + reg_loss.forward()
                ce.backward(retain_graph=False)

            acc = acc_eval(torch.max(y_logits, dim=1)[1], data[1])  # 计算每个batch的平均准确率
            train_acc_list.append(acc)

            optimizer.step()
            model.zero_grad()

            if batch_index % config.print_per_batch == 0:
                train_loss_mean = 0.0
                train_acc_mean = np.mean(train_acc_list)
                train_loss_list = []
                train_acc_list = []

                _, _, val_acc_mean, val_loss_mean = bert_evaluate(model, config, ce_loss, val)
                val_acc_list.append(val_acc_mean)
                val_loss_list.append(val_loss_mean)

                if val_acc_mean > best_val_acc:
                    torch.save(model.state_dict(), config.model_name)
                    best_val_acc = val_acc_mean
                    improved_str = '*'
                else:
                    improved_str = ''

                time_dif = get_time_dif(start_time)
                msg = 'Iter: {0:>6}, Train Loss: {1:>7.2}, Train Acc: {2:>7.2%},' \
                      + ' Validation Loss: {3:>7.2}, Validation Acc: {4:>7.2%}, Time: {5} {6}'
                logfile(msg.format(batch_index, train_loss_mean, train_acc_mean,
                                   val_loss_mean, val_acc_mean, time_dif, improved_str))
    logfile('最佳val acc: {0:>7.2%}'.format(best_val_acc))
    return best_val_acc


def bert_pi(config):
    ce_loss = nn.CrossEntropyLoss()
    l2_loss = nn.MSELoss()
    # l2_loss = nn.MSELoss()
    # kl_loss = nn.KLDivLoss()

    model = BertClassifier(config)
    # model.load_state_dict(torch.load('./model.pt'))
    model.to(DEVICE)

    noise = AddGaussianNoise()

    logfile(
        f'pi 混合 1路noise ce+mse num_epochs:{config.num_epochs}, 带标签:{config.label_num}, 无标签:{config.unlabel_num}')

    start_time = time.time()
    time_dif = get_time_dif(start_time)
    logfile(f"Time usage:, {time_dif}")

    best_val_acc = 0.0
    val_loss_list = []
    val_acc_list = []

    train = np.load(file='dbpedia/train-32.npy', allow_pickle=True)
    val = np.load(file='dbpedia/val-32.npy', allow_pickle=True)

    rng = np.random.RandomState(246)  # 135 246
    indices = np.arange(len(train))
    rng.shuffle(indices)
    train = train[indices]

    err = np.random.RandomState(456)  # 456
    err_idx = err.choice(config.label_num,
                         size=int(config.label_num * config.err_rate),
                         replace=False)
    for i in err_idx:
        train[i][1] += err.randint(low=1, high=config.num_classes)
        if train[i][1] >= config.num_classes:
            train[i][1] -= config.num_classes

    for item in train[-config.unlabel_num:]:
        item[1] = config.num_classes
    train = np.append(train[:config.label_num], train[-config.unlabel_num:], axis=0)
    rng.shuffle(train)


    # param_optimizer = list(model.named_parameters())
    # no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    # optimizer_grouped_parameters = [
    #     {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
    #     {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
    # optimizer = BertAdam(optimizer_grouped_parameters,
    #                      lr=1e-5,
    #                      warmup=0.05,
    #                      t_total=len(train) * config.num_epochs)
    optimizer = optim.Adam(model.parameters(), lr=1e-5)

    for epoch in range(config.num_epochs):
        logfile(f'Epoch:{epoch}')
        train_loss_list = []
        train_acc_list = []

        trainset = DatasetIter(data=train, device=DEVICE)
        train_loader = DataLoader(dataset=trainset, batch_size=config.batch_size, num_workers=0, shuffle=True)

        for batch_index, data in enumerate(train_loader):
            model.train()

            label_idx = (data[1] != config.num_classes).nonzero()
            label_idx = label_idx.view(label_idx.shape[0])

            y_logits = model.forward(data)
            y_noise = model.forward(data, noise)

            l2 = rampup(epoch, end=config.num_epochs) * l2_loss(input=y_noise, target=y_logits)
            l2.backward(retain_graph=True)

            if not len(label_idx) == 0:
                reg_loss = Regularization(model, config.weight_decay)
                ce = ce_loss(input=y_logits[label_idx], target=data[1][label_idx]) + reg_loss.forward()
                ce.backward(retain_graph=False)

            acc = acc_eval(torch.max(y_logits, dim=1)[1], data[1])  # 计算每个batch的平均准确率
            train_acc_list.append(acc)

            optimizer.step()
            model.zero_grad()

            if batch_index % config.print_per_batch == 0:
                train_loss_mean = 0.0
                train_acc_mean = np.mean(train_acc_list)
                train_loss_list = []
                train_acc_list = []

                _, _, val_acc_mean, val_loss_mean = bert_evaluate(model, config, ce_loss, val)
                val_acc_list.append(val_acc_mean)
                val_loss_list.append(val_loss_mean)

                if val_acc_mean > best_val_acc:
                    torch.save(model.state_dict(), config.model_name)
                    best_val_acc = val_acc_mean
                    improved_str = '*'
                else:
                    improved_str = ''

                time_dif = get_time_dif(start_time)
                msg = 'Iter: {0:>6}, Train Loss: {1:>7.2}, Train Acc: {2:>7.2%},' \
                      + ' Validation Loss: {3:>7.2}, Validation Acc: {4:>7.2%}, Time: {5} {6}'
                logfile(msg.format(batch_index, train_loss_mean, train_acc_mean,
                                   val_loss_mean, val_acc_mean, time_dif, improved_str))
    logfile('最佳val acc: {0:>7.2%}'.format(best_val_acc))
    return best_val_acc


def bert_normal(config):
    ce_loss = nn.CrossEntropyLoss()

    model = BertClassifier(config)
    # model.load_state_dict(torch.load('./model.pt'))
    model.to(DEVICE)

    logfile(
        f'全监督 ce num_epochs:{config.num_epochs}, 带标签:{config.label_num}, 无标签:{config.unlabel_num}')

    start_time = time.time()
    time_dif = get_time_dif(start_time)
    logfile(f"Time usage:, {time_dif}")

    best_val_acc = 0.0
    val_loss_list = []
    val_acc_list = []

    train = np.load(file='dbpedia/train-32.npy', allow_pickle=True)
    val = np.load(file='dbpedia/val-32.npy', allow_pickle=True)

    rng = np.random.RandomState(246)  # 135 246
    indices = np.arange(len(train))
    rng.shuffle(indices)
    train = train[indices]

    err = np.random.RandomState(456)  # 456
    err_idx = err.choice(config.label_num,
                         size=int(config.label_num * config.err_rate),
                         replace=False)
    for i in err_idx:
        train[i][1] += err.randint(low=1, high=config.num_classes)
        if train[i][1] >= config.num_classes:
            train[i][1] -= config.num_classes

    train = train[:config.label_num]

    # param_optimizer = list(model.named_parameters())
    # no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    # optimizer_grouped_parameters = [
    #     {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
    #     {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
    # optimizer = BertAdam(optimizer_grouped_parameters,
    #                      lr=1e-5,
    #                      warmup=0.05,
    #                      t_total=len(train) * config.num_epochs)
    optimizer = optim.Adam(model.parameters(), lr=1e-5)

    for epoch in range(config.num_epochs):
        logfile(f'Epoch:{epoch}')
        train_loss_list = []
        train_acc_list = []

        trainset = DatasetIter(data=train, device=DEVICE)
        train_loader = DataLoader(dataset=trainset, batch_size=config.batch_size, num_workers=0, shuffle=False)

        for batch_index, data in enumerate(train_loader):
            model.train()

            y_logits = model.forward(data)
            # reg_loss = Regularization(model, config.weight_decay)
            # ce = ce_loss(input=y_logits, target=data[1]) + reg_loss.forward()
            ce = ce_loss(input=y_logits, target=data[1])
            ce.backward(retain_graph=False)

            acc = acc_eval(torch.max(y_logits, dim=1)[1], data[1])  # 计算每个batch的平均准确率
            train_acc_list.append(acc)

            optimizer.step()
            model.zero_grad()

            if batch_index % config.print_per_batch == 0:
                train_loss_mean = 0.0
                train_acc_mean = np.mean(train_acc_list)
                train_loss_list = []
                train_acc_list = []

                _, _, val_acc_mean, val_loss_mean = bert_evaluate(model, config, ce_loss, val)
                val_acc_list.append(val_acc_mean)
                val_loss_list.append(val_loss_mean)

                if val_acc_mean > best_val_acc:
                    torch.save(model.state_dict(), config.model_name)
                    best_val_acc = val_acc_mean
                    improved_str = '*'
                else:
                    improved_str = ''

                time_dif = get_time_dif(start_time)
                msg = 'Iter: {0:>6}, Train Loss: {1:>7.2}, Train Acc: {2:>7.2%},' \
                      + ' Validation Loss: {3:>7.2}, Validation Acc: {4:>7.2%}, Time: {5} {6}'
                logfile(msg.format(batch_index, train_loss_mean, train_acc_mean,
                                   val_loss_mean, val_acc_mean, time_dif, improved_str))
    logfile('最佳val acc: {0:>7.2%}'.format(best_val_acc))
    return best_val_acc


def main_bert(config):
    val_acc = bert_pmgd(config)
    # val_acc = bert_pi(config)
    # val_acc = bert_vat(config)
    # val_acc = bert_normal(config)
    test_acc = bert_test(config)
    return val_acc, test_acc


if __name__ == '__main__':
    main_val = []
    main_test = []
    for err_rate in [0.0, 0.1, 0.2, 0.3, 0.4]:
        config = Config()
        config.label_num = 1000
        config.unlabel_num = 1000
        config.num_epochs = 5
        config.batch_size = 8
        config.pifgm_flag = True
        config.sce_flag = True
        config.sce_fgm_flag = True
        config.err_rate = err_rate
        valacc, testacc = main_bert(config)
        main_val.append(valacc)
        main_test.append(testacc)
    texts = f'summary...\n' \
            f'main_val:{main_val}\n' \
            f'main_test:{main_test}\n'
    logfile(texts)
