import datetime
import os

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from matplotlib import pyplot as plt
from torch.nn import KLDivLoss
from torch.utils.data import DataLoader, TensorDataset
from tqdm import tqdm
from transformers import AdamW

from dataloading import collate_fn
from evaluate import evaluate
from loss import contrastive_ent, self_entropy
from relabel import get_prototypes_train_relabel, get_relabel_proto, get_o_prototypes_train_relabel, check_labels_batch

def raw_labels_statistic(raw_relabels, labels_list):
    labels_dict = {i: 0 for i in range(len(labels_list))}
    for i in range(len(raw_relabels)):
        for j in range(len(raw_relabels[i])):
            if raw_relabels[i][j].item() > 0:
                labels_dict[raw_relabels[i][j].item()] += 1
    print(labels_dict)


def get_raw_label_weight(
    args,
    prev_feature,
    old_proto_features,
    raw_relabels,
    labels_list,
    task_id
):
    prev_feature_flatten = prev_feature.view(-1, prev_feature.size(-1))
    labels_flatten = raw_relabels.view(-1)
    sim = torch.matmul(prev_feature_flatten, old_proto_features.T)
    exp_sim = torch.exp(sim)
    prob = exp_sim / torch.sum(exp_sim, dim=-1, keepdim=True)
    kd_weight = torch.zeros(prob.shape[0]).to(args.device)
    condition = (labels_flatten < len(labels_list) - args.per_types[task_id]) & (labels_flatten >= 0)

    idx = torch.where(condition)
    labels_flatten_no_e_now = labels_flatten[idx]
    prob_idx = prob[idx]

    kd_w_idx = prob_idx[torch.arange(prob_idx.size(0)), labels_flatten_no_e_now]
    kd_weight[idx] = kd_w_idx
    kd_weight = kd_weight.view(raw_relabels.shape[0], -1)
    prob = prob.view(prev_feature.shape[0], prev_feature.shape[1], -1)
    condition = condition.view(prev_feature.shape[0], prev_feature.shape[1])
    return prob, kd_weight, condition, idx





def train_current_task(
        args,
        encoder,
        prev_encoder,
        classifier,
        prev_classifier,
        train_dataset,
        dev_dataset,
        test_dataset,
        labels_list,
        output_dir,
        task_id
):

    # 没有relabel的原始标签
    if args.use_utils_dataset:
        dataloader = DataLoader(
            train_dataset,
            batch_size=args.batch_size,
            shuffle=True,
            collate_fn=collate_fn,
            drop_last=False)
    else:
        dataloader = DataLoader(
            train_dataset,
            batch_size=args.batch_size,
            shuffle=True,
            drop_last=False,
        )

    if task_id > 0 and args.use_utils_dataset == False:
        print("==============getting training raw relabels and proto==============")
        old_proto_types, old_proto_features, ent_num, raw_relabels = get_relabel_proto(
            args=args,
            prev_encoder=prev_encoder,
            prev_classifier=prev_classifier,
            train_dataset=train_dataset,
            step_id=task_id,
            labels_list=labels_list)
        # print("raw_labels_statistic")
        # raw_labels_statistic(raw_relabels, labels_list)
        if args.dev_relabel:
            print("==============getting dev raw relabels and proto==============")
            dev_old_proto_types, dev_old_proto_features, dev_ent_num, dev_raw_relabels = get_relabel_proto(
                args=args,
                prev_encoder=prev_encoder,
                prev_classifier=prev_classifier,
                train_dataset=dev_dataset,
                step_id=task_id,
                labels_list=labels_list)

            all_input_ids = [data[0] for data in dev_dataset]
            all_input_mask = [data[1] for data in dev_dataset]
            all_segment_ids = [data[2] for data in dev_dataset]
            all_bi_labels = [data[4] for data in dev_dataset]
            all_input_ids_tensor = torch.stack(all_input_ids)
            all_input_mask_tensor = torch.stack(all_input_mask)
            all_segment_ids_tensor = torch.stack(all_segment_ids)
            all_label_ids = dev_raw_relabels
            all_bi_labels = torch.stack(all_bi_labels)

            dev_dataset = TensorDataset(all_input_ids_tensor, all_input_mask_tensor, all_segment_ids_tensor, all_label_ids, all_bi_labels)
            # print("raw_labels_statistic")
            # raw_labels_statistic(dev_raw_relabels, labels_list)

            if args.dev_relabel_cooked:
                print("==============getting dev cooked relabels and proto==============")
                dev_dataloader = DataLoader(dev_dataset,
                                        batch_size=128,
                                        shuffle=False,
                                        drop_last=False)
                dev_cooked_relabels_list = []

                bar = tqdm(dev_dataloader, desc="getting dev cooked relabels")
                for step, batch in enumerate(bar):
                    for i in range(len(batch)):
                        batch[i] = batch[i].to(args.device)
                    with torch.no_grad():
                        dev_prev_hidden, dev_prev_feature = prev_encoder.forward(
                            input_ids=batch[0],
                            attention_mask=batch[1])
                        dev_prev_output = prev_classifier.forward(
                            hidden=dev_prev_hidden,
                            labels=None)[0]
                        dev_prev_raw_prob = F.softmax(dev_prev_output, dim=-1)

                        dev_raw_relabels = batch[3]
                        dev_other_types, dev_other_features, dev_other_num = get_o_prototypes_train_relabel(
                            args,
                            dev_prev_hidden,
                            dev_prev_feature,
                            dev_raw_relabels,
                            prev_encoder)
                        dev_old_proto = torch.cat([dev_other_features, dev_old_proto_features], dim=0)

                        dev_prob_weight, dev_kd_weight, dev_relabel_mask, dev_old_other_idx = get_raw_label_weight(
                            args,
                            dev_prev_feature,
                            dev_old_proto,
                            dev_raw_relabels,
                            labels_list)

                        dev_prev_prob = dev_prob_weight * dev_prev_raw_prob
                        dev_cook_relabels = torch.max(dev_prev_prob, dim=-1)[1]
                        dev_relabels = torch.where(dev_relabel_mask, dev_cook_relabels, dev_raw_relabels)
                        dev_cooked_relabels_list.append(dev_relabels.detach().cpu())


                dev_cooked_relabels = torch.cat(dev_cooked_relabels_list, dim=0)
                all_input_ids = [data[0] for data in dev_dataset]
                all_input_mask = [data[1] for data in dev_dataset]
                all_segment_ids = [data[2] for data in dev_dataset]
                all_bi_labels = [data[4] for data in dev_dataset]
                all_input_ids_tensor = torch.stack(all_input_ids)
                all_input_mask_tensor = torch.stack(all_input_mask)
                all_segment_ids_tensor = torch.stack(all_segment_ids)
                all_label_ids = dev_cooked_relabels
                all_bi_labels = torch.stack(all_bi_labels)
                dev_dataset = TensorDataset(all_input_ids_tensor, all_input_mask_tensor, all_segment_ids_tensor,
                                            all_label_ids, all_bi_labels)
                # print("cooked_labels_statistic")
                # raw_labels_statistic(dev_cooked_relabels, labels_list)

        dataloader = DataLoader(train_dataset,
                                batch_size=128,
                                shuffle=False,
                                drop_last=False)
        print("==============getting training old output and probablity==============")
        bar = tqdm(dataloader, desc="Old Training Probability")
        prev_linear_output = []
        prev_linear_output_wo_softmax = []
        prev_features, prev_hiddens = [], []
        all_input_ids, all_input_mask, all_segment_ids, all_label_ids = [], [], [], []
        for step, batch in enumerate(bar):
            for i in range(len(batch)):
                batch[i] = batch[i].to(args.device)
            with torch.no_grad():
                prev_hidden, prev_feature = prev_encoder.forward(
                    input_ids=batch[0],
                    attention_mask=batch[1])
                prev_output = prev_classifier.forward(
                    hidden=prev_hidden,
                    labels=None)[0]
                prev_output_prob = F.softmax(prev_output, dim=-1)
                prev_linear_output.append(prev_output_prob)
                prev_linear_output_wo_softmax.append(prev_output)
                prev_features.append(prev_feature)
                prev_hiddens.append(prev_hidden)
                all_label_ids.append(batch[3])
                all_input_ids.append(batch[0])
                all_input_mask.append(batch[1])
                all_segment_ids.append(batch[2])

        torch.cuda.empty_cache()
        all_input_ids = torch.cat(all_input_ids, dim=0).to(args.device)
        all_input_mask = torch.cat(all_input_mask, dim=0).to(args.device)
        all_segment_ids = torch.cat(all_segment_ids, dim=0).to(args.device)
        all_label_ids = torch.cat(all_label_ids, dim=0).to(args.device)
        all_linear_output = torch.cat(prev_linear_output, dim=0).to(args.device)
        all_linear_output_wo_softmax = torch.cat(prev_linear_output_wo_softmax, dim=0).to(args.device)
        all_prev_features = torch.cat(prev_features, dim=0).to(args.device)
        all_prev_hiddens = torch.cat(prev_hiddens, dim=0).to(args.device)
        all_raw_relabels = raw_relabels.to(args.device)

        train_dataset_with_old_prob = TensorDataset(
            all_input_ids,
            all_input_mask,
            all_segment_ids,
            all_label_ids,
            all_linear_output,
            all_prev_features,
            all_prev_hiddens,
            all_raw_relabels,
            all_linear_output_wo_softmax
            )
        print("len of train: ", train_dataset_with_old_prob.__len__())
        dataloader = DataLoader(train_dataset_with_old_prob,
                                batch_size=args.batch_size,
                                shuffle=True,
                                drop_last=False)

    proto_features = None

    optimizer = AdamW(
        [
            {"params": encoder.parameters(), "lr": args.encoder_lr},
            {"params": classifier.parameters(), "lr": args.classifier_lr},
        ],
        eps=args.adam_eps,
        no_deprecation_warning=True
    )
    loss_func = KLDivLoss(reduction="none")
    LogSoftmax = nn.LogSoftmax(dim=-1)

    if len(labels_list) - args.per_types[0] > 1:
        classifier.weight_copy(
            prev_classifier=prev_classifier,
            task_id=task_id
        )
        print("=================weight copy=================")



    best_f1 = 0
    best_f1_mar = 0
    best_epoch = -1
    loss_num = 0
    loss_total = 0
    self_loss, loss_ce_re, loss_kd, con_loss, sim_loss = \
        torch.tensor(0), torch.tensor(0), torch.tensor(0), torch.tensor(0), torch.tensor(0)
    best_encoder = encoder
    best_classifier = classifier
    best_results = None
    no_increace_num = 0
    for epoch in range(args.current_epoch):
        print("================Model Training Current: epoch {}================".format(epoch))

        encoder.train()
        classifier.train()
        bar = tqdm(dataloader, desc="Current Epoch {}".format(epoch))

        for step, batch in enumerate(bar):
            batch = list(batch)
            for i in range(len(batch)):
                batch[i] = batch[i].to(args.device)

            # encoder的输出已经经过了归一化
            # print("batch: ", batch[0].shape, batch[1].shape, batch[2].shape, batch[3].shape)

            hidden, feature = encoder.forward(
                            input_ids=batch[0],
                            attention_mask=batch[1])

            ce_loss, current_output = classifier.forward(
                hidden=hidden,
                labels=batch[3])

            if args.use_con:
                con_loss = contrastive_ent(args, batch[3], feature)

            loss = ce_loss + con_loss * args.con_weight

            if task_id > 0 and args.use_distill:
                with torch.no_grad():
                    prev_encoder.eval()
                    prev_classifier.eval()

                    if args.use_utils_dataset:
                        prev_hidden, prev_feature = prev_encoder.forward(
                            input_ids=batch[0],
                            attention_mask=batch[1])
                        prev_output = prev_classifier.forward(
                            hidden=prev_hidden,
                            labels=None)[0]
                        prev_raw_prob = F.softmax(prev_output, dim=-1)
                        pred = torch.argmax(prev_raw_prob, dim=-1)
                        raw_relabels = check_labels_batch(
                                            pred,
                                            batch[3])
                    else:
                        prev_raw_prob = batch[4]
                        prev_feature = batch[5]
                        prev_hidden = batch[6]
                        raw_relabels = batch[7]
                        prev_linear_output_wo_softmax = batch[8]
                    other_types, other_features, other_num = get_o_prototypes_train_relabel(
                        args,
                        prev_hidden,
                        prev_feature,
                        raw_relabels,
                        prev_encoder)

                    old_proto = torch.cat([other_features, old_proto_features], dim=0)

                    prob_weight, kd_weight, relabel_mask, old_other_idx = get_raw_label_weight(
                        args,
                        prev_feature,
                        old_proto,
                        raw_relabels,
                        labels_list,
                        task_id
                    )
                    # for i in range(prob_weight.shape[0]):
                    #     for j in range(prob_weight.shape[1]):
                    #         pw = prob_weight[i][j]
                    #         mas = torch.where(pw != 0)
                    #         pw_mas = pw[mas]
                    #         pw_mean = torch.mean(pw_mas)
                    #         prob_weight[i][j] = torch.where(pw != 0, pw, pw_mean)

                    prev_prob = prob_weight * prev_raw_prob
                    cook_relabels = torch.max(prev_prob, dim=-1)[1]
                    relabels = torch.where(relabel_mask, cook_relabels, raw_relabels)
                    prev_raw_prob = prev_raw_prob.view(-1, prev_raw_prob.shape[-1])[old_other_idx]

                if args.use_con:
                    if args.use_con_pseudo:
                        con_loss = contrastive_ent(args, relabels, feature)
                    else:
                        con_loss = contrastive_ent(args, raw_relabels, feature)



                if args.use_self_entropy:
                    self_loss = self_entropy(current_output)

                if args.use_kd:
                    current_output_old = \
                        current_output.view(-1, current_output.shape[-1])[old_other_idx]
                    current_output_prob_old = \
                        LogSoftmax(current_output_old[:, :current_output_old.shape[-1] - args.per_types[task_id]])
                    loss_kd_list = loss_func(
                        current_output_prob_old,
                        prev_raw_prob).sum(dim=-1)
                    if args.use_kd_weight:
                        kd_weight = kd_weight.view(-1)[old_other_idx]

                        loss_kd = torch.mean(loss_kd_list * kd_weight)
                    else:
                        loss_kd = torch.mean(loss_kd_list)

                    if args.use_output_sim:
                        current_output_pre_types = current_output[:, :, :current_output.shape[-1] - args.per_types[task_id]]
                        sim_loss = 1 - F.cosine_similarity(current_output_pre_types, prev_linear_output_wo_softmax)
                        sim_loss = torch.mean(sim_loss)
                        loss_kd = args.lambda3 * loss_kd + (1 - args.lambda3) * sim_loss



                ce_loss_fct = nn.CrossEntropyLoss()
                labels_flat = batch[3].view(-1)


                if args.use_ce_pseudo:
                    ce_loss = ce_loss_fct(
                        current_output.view(-1, current_output.shape[-1]), relabels.view(-1))

                else:
                    ce_loss = ce_loss_fct(
                        current_output.view(-1, current_output.shape[-1]), raw_relabels.view(-1))


                if args.use_loss_new:
                    if args.use_old_other_losss_new:
                        old_mask = (labels_flat >= len(labels_list) - args.per_types[task_id]) | (labels_flat == 0)


                    else:
                        old_mask = labels_flat >= len(labels_list) - args.per_types[task_id]
                    new_idx = torch.where(old_mask)[0]
                    ce_loss_new = ce_loss_fct(
                        current_output.view(-1, current_output.shape[-1])[new_idx],
                        raw_relabels.view(-1)[new_idx])
                    if not torch.isnan(ce_loss_new):
                        ce_loss = ce_loss + args.lambda4 * ce_loss_new



                loss = ce_loss + \
                       args.con_weight * con_loss + \
                       args.self_weight * self_loss + \
                       args.kd_weight * task_id * loss_kd




            loss_total += loss
            loss_num += 1
            bar.set_description(
                "mean: {:.4f}, loss: {:.4f}, ce: {:.4f}, kd: {:.4f}, sim: {:.4f}, con: {:.4f}, s_e: {:.4f}".
                format(loss_total.item() / loss_num, loss.item(), ce_loss.item(), loss_kd.item(),
                       sim_loss.item(), con_loss.item(), self_loss.item()))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()



        # print("-----------train evaluate------------")
        #
        # macro_f1, micro_f1, preds_list, preds, out_label_ids, results, loss_tup = evaluate(
        #     args, encoder, classifier, train_dataset, labels_list=labels_list, task_id=task_id,
        #     proto_features=proto_features)
        # with open(args.report_file, "a") as f:
        #     f.write(
        #         "now_dev: epoch: {}, macro_f1: {:.4f}, micro_f1: {:.4f}, loss_tuple: {}\n".format(epoch, macro_f1["f1"],
        #                                                                                           micro_f1["f1"],
        #                                                                                           loss_tup))

        if args.training_evaluate:
            print("-----------training evaluate------------")

            macro_f1, micro_f1, preds_list, preds, out_label_ids, results, loss_tup = evaluate(
                args, encoder, classifier, train_dataset, labels_list=labels_list, task_id=task_id,
                proto_features=proto_features, mode="train")
            with open(args.report_file, "a") as f:
                f.write(
                    "now_train: epoch: {}, macro_f1: {:.4f}, micro_f1: {:.4f}, loss_tuple: {}\n".format(epoch, macro_f1["f1"],
                                                                                                      micro_f1["f1"],
                                                                                                      loss_tup))

        print("-----------dev evaluate------------")

        macro_f1, micro_f1, preds_list, preds, out_label_ids, results, loss_tup = evaluate(
            args, encoder, classifier, dev_dataset, labels_list=labels_list, task_id=task_id, proto_features=proto_features, mode="dev")
        with open(args.report_file, "a") as f:
            f.write("now_dev: epoch: {}, macro_f1: {:.4f}, micro_f1: {:.4f}, loss_tuple: {}\n".format(epoch, macro_f1["f1"], micro_f1["f1"], loss_tup))

        # if prev_encoder is not None:
        #     print("-----------prev_dev evaluate------------")
        #     macro_f1, micro_f1, preds_list, preds, out_label_ids, results, loss_tup = evaluate(
        #         args, encoder, classifier, prev_dev_dataset, labels_list=labels_list, task_id=task_id-1, proto_features=proto_features)
        #     with open(args.report_file, "a") as f:
        #         f.write("pre_test: epoch: {}, macro_f1: {:.4f}, micro_f1: {:.4f},, loss_tuple: {}\n".format(epoch, macro_f1["f1"],
        #                                                                                   micro_f1["f1"], loss_tup))

            # macro_f1, micro_f1, preds_list, preds, out_label_ids = evaluate(args, prev_encoder, prev_classifier,
            #                                                                 prev_dev_dataset)
            # macro_f1, micro_f1, preds_list, preds, out_label_ids = evaluate(args, prev_encoder, classifier,
            #                                                                 prev_dev_dataset)
            # macro_f1, micro_f1, preds_list, preds, out_label_ids = evaluate(args, encoder, prev_classifier,
            #                                                                 prev_dev_dataset)


        # print("-----------test evaluate------------")
        #
        # macro_f1, micro_f1, preds_list, preds, out_label_ids, results, loss_tup = evaluate(
        #     args, encoder, classifier, test_dataset, labels_list=labels_list, task_id=task_id, proto_features=proto_features)
        # with open(args.report_file, "a") as f:
        #     f.write("test: epoch: {}, macro_f1: {:.4f}, micro_f1: {:.4f}, loss_tuple: {}\n".format(epoch, macro_f1["f1"], micro_f1["f1"], loss_tup))
        #     f.write("\n")
        if micro_f1["f1"] >= best_f1 and epoch >= 5:
            best_encoder = encoder
            best_classifier = classifier
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)
            torch.save(encoder.state_dict(), output_dir + "/encoder.pt")
            print("===============save encoder.pt==================")
            torch.save(classifier.state_dict(), output_dir + "/classifier.pt")
            print("==============save classifier.pt================")
            best_f1 = micro_f1["f1"]
            best_f1_mar = macro_f1["f1"]
            best_tup = loss_tup
            best_epoch = epoch
            best_results = results
            print("best_f1 = ", best_f1)
            no_increace_num = 0
        else:
            no_increace_num += 1
        if no_increace_num >= args.early_stop:
            print("===============early stop==================")
            break
    macro_f1, micro_f1, preds_list, preds, out_label_ids, results, loss_tup = evaluate(
        args, best_encoder, best_classifier, test_dataset, labels_list=labels_list, task_id=task_id, proto_features=proto_features, tsne=False, mode="test")
    with open(args.report_file, "a") as f:
        f.write("task: {} best_micro_f1: {:.4f}\n".format(task_id, best_f1))
        f.write(str(best_results))
        f.write("\n------------------------------------------\n")

    with open(args.best_file, "a") as f:
        f.write("task: {}, best_epoch: {}, \n "
                "best_micro_f1: pre: {:.4f}, rec: {:.4f}, f1:{:.4f}\n"
                "best_marco_f1: pre: {:.4f}, rec: {:.4f}, f1:{:.4f}\n".
                format(task_id, best_epoch,
                       micro_f1["precision"], micro_f1["recall"], micro_f1["f1"],
                       macro_f1["precision"], macro_f1["recall"], macro_f1["f1"]))
        f.write(str(results))

        f.write("\n------------------------------------------\n")


    return micro_f1["f1"], macro_f1["f1"]

if __name__ == '__main__':
    train_current_task()  # 您的主函数