import sys
sys.path.append("../../")
import fitlog
from transformers import AdamW
from transformers import DistilBertConfig
from transformers import DistilBertTokenizer
from transformers import BertTokenizer, BertConfig
from modeling_bert import AugBertForSequenceClassification
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, precision_recall_fscore_support
from datareader import *
from model import VanillaBert
from metrics import *
import pickle
from tqdm import trange, tqdm
from InstanceReweighting import MetaEvaluator, InstanceReweighting, update_current_devices, to_var

def pred_Logits(model:VanillaBert, data, idxs=None, batch_size=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in trange(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i+batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.predict(batch)
            preds.append(pred)
    pred_tensor = torch.cat(preds)
    return pred_tensor

def prediction(model:VanillaBert, data, idxs=None, batch_size=20):
    pred_tensor = pred_Logits(model, data, idxs, batch_size)
    vals, idxs = pred_tensor.sort(dim=1)
    return idxs[:, -1], vals[:, -1]

def acc_P_R_F1(y_true, y_pred):
    return accuracy_score(y_true, y_pred.cpu()), \
                precision_recall_fscore_support(y_true, y_pred.cpu(), average=None)

def Perf(model:VanillaBert, data, label, idxs=None, batch_size=20):
    y_pred, _ = prediction(model, data, idxs=idxs, batch_size=batch_size)
    y_true = label[idxs] if idxs is not None else label
    return acc_P_R_F1(y_true, y_pred)

def WeakLabeling(model:VanillaBert, data:NLIDataset, pseaudo_idxs=[], batch_size=20):
    c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
    pred_tensor = pred_Logits(model, data, idxs=c_idxs, batch_size=batch_size)
    confs, preds = pred_tensor.sort(dim=1)
    weak_label = preds[:, -1].cpu().numpy()
    data.setLabel(weak_label, c_idxs)
    entrophy = torch.zeros([len(data)], device=model.device)
    entrophy[c_idxs] = (confs.log().abs() * confs).sum(dim=1)
    data.setConfidence(confs[:, -1].cpu(), c_idxs)
    data.setEntrophy(entrophy.cpu(), c_idxs)
    return entrophy.cpu(), preds[:, -1].cpu(), confs[:, -1].cpu()

def expandPseaudoSet(model:VanillaBert, unlabeled:NLIDataset, skip_idxs=None, threshold=0.95, max_cnt=50):
    if skip_idxs is None:
        c_idxs = list(range(len(unlabeled)))
    else:
        c_idxs = list(set(range(len(unlabeled))) - set(skip_idxs))
    pred_1, conf_1 = prediction(model, unlabeled, c_idxs)
    pred_2 = unlabeled.labelTensor()[c_idxs].to(pred_1.device)
    conf_2 = unlabeled.confidence[c_idxs].to(conf_1.device)
    pred_eq = (pred_1 - pred_2).abs().__eq__(0)
    valid_conf_1 = conf_1.__gt__(threshold) & pred_eq
    valid_conf_2 = conf_2.__gt__(threshold) & valid_conf_1
    expand_idxs = torch.tensor(c_idxs, device=valid_conf_2.device)[valid_conf_2]
    if len(expand_idxs) > max_cnt:
        conf_f1 = 2*conf_2*conf_1/(conf_2+conf_1)
        sort_idxs = conf_f1[valid_conf_2].argsort()[-max_cnt:]
        expand_idxs = expand_idxs[sort_idxs].tolist()
    else:
        expand_idxs = expand_idxs.tolist()
    return expand_idxs

def BERTEvaluater(data_set:NLIDataset, label):
    def evaluater(model:VanillaBert):
        y_pred, _ = prediction(model, data_set, batch_size=20)
        pred = y_pred.cpu()
        return accuracy_score(label, pred), \
                f1_score(label, pred, average="micro"), \
                    precision_recall_fscore_support(label, pred, average=None)
    return evaluater

class MetaSelfTrainer(MetaEvaluator):
    def __init__(self, model: VanillaBert, weak_set, few_shot_set,
                weak_set_label, exp_idxs=[], weak_set_weights=None, convey_fn=None,
                 lr4model=2e-2, scale_lr4model=1e-3, coeff4expandset=1.0, max_few_shot_size=100,
                batch_size=5):
        super(MetaSelfTrainer, self).__init__(model, weak_set, few_shot_set,
                                               weak_set_label, exp_idxs, weak_set_weights,
                                               convey_fn, lr4model, scale_lr4model, coeff4expandset,
                                               max_few_shot_size, batch_size)
        self.expand_batch = []

    def LogSelectionInfo(self, e_arr, valid_idxs=None):
        indices = torch.arange(len(self.weak_set))
        print(">>>>>>>MetaEvaluate Message>>>>>>>>>>>>>>>")
        pos_indices = valid_idxs if valid_idxs is not None else indices[self.weak_set_weights.__gt__(0.0)]
        labels, preds = self.weak_set_label[indices], self.weak_set.labelTensor()[indices]
        print(len(indices))
        print(len(pos_indices))
        print(e_arr.mean(), e_arr[pos_indices].mean())
        print(accuracy_score(labels, preds), accuracy_score(labels[pos_indices], preds[pos_indices]))
        print(precision_score(labels, preds, average=None), precision_score(labels[pos_indices], preds[pos_indices], average=None))
        print(recall_score(labels, preds, average=None), recall_score(labels[pos_indices], preds[pos_indices], average=None))
        print(f1_score(labels, preds, average=None), f1_score(labels[pos_indices], preds[pos_indices], average=None))
        print("<<<<<<<<<<<<<<<<<MetaEvaluate Message<<<<<<<<<<<<")

    def ModelTrain(self, max_epoch, valid_indices):
        labels, preds = self.weak_set_label[valid_indices], self.weak_set.labelTensor()[valid_indices]
        print("trainSet perf:", acc_P_R_F1(labels, preds))
        self.model.bert.bert.embeddings.aug_type = 'mix'
        optimizer_grouped_parameters = [
            {'params': p,
             'lr': lr * pow(0.8, 12 - int(n.split("layer.")[1].split(".", 1)[0])) if "layer." in n \
                 else (lr * pow(0.8, 13) if "embedding" in n else lr ) \
             # layer-wise fine-tuning
             } for n, p in self.model.named_parameters()
        ]
        model_optim = torch.optim.Adam(optimizer_grouped_parameters)
        batch_size = 32
        for epoch in range(max_epoch):
            sum_loss = 0.
            for idx in range(0, len(valid_indices), batch_size):
                batch = self.weak_set.collate_raw_batch(
                    [self.weak_set[jj] for jj in valid_indices[idx:min(idx + batch_size, len(valid_indices))]]
                )
                cost, acc = self.model.lossAndAcc(batch)
                self.model.zero_grad()
                model_optim.zero_grad()
                cost.backward()
                self.model.bert.bert.PreserveGrad()
                model_optim.step()
                torch.cuda.empty_cache()
                print('####Model Update (%3d | %3d) ####, loss = %6.8f, acc = %6.8f' % (
                    idx, len(valid_indices), cost.data, acc
                ))
                sum_loss += cost.data
            mean_loss = (sum_loss * 1.0) / ((len(valid_indices) // self.batch_size) + 1)
            print("mean loss:", mean_loss)
            if mean_loss < 0.1:  # early stop
                break
        self.model.bert.bert.embeddings.aug_type = 'none'

    def Training(self, entrophys, max_epoch=1, batch_size=32, max_meta_steps=10,
                 lr4weights=0.1, meta_lr4model=1e-1,
                 meta_scale_lr4model=5e-3):
        tmp = (self.lr4model, self.scale_lr4model)
        self.lr4model, self.scale_lr4model = meta_lr4model, meta_scale_lr4model
        exp_idxs = self.Evaluate(max_epochs=1, max_meta_steps=max_meta_steps, lr4weights=lr4weights)  # ferguson 上是0.1, sydney上是0.05
        self.lr4model, self.scale_lr4model = tmp[0], tmp[1]
        self.LogSelectionInfo(entrophys)
        valid_indices = self.BalancedSelections() # select a balanced set based on the weak_set_weights
        self.batch_size = batch_size
        self.ModelTrain(max_epoch, valid_indices)
        return exp_idxs, torch.arange(self.weak_set_size)[self.weak_set_weights.__gt__(0.0)].tolist()

    def BalancedTraining(self, entrophys, max_epoch=1, batch_size=32, max_meta_steps=10,
        lr4weights=0.1, meta_lr4model=1e-1, meta_scale_lr4model=5e-3, pseaudo_idxs=[]):
        tmp = (self.lr4model, self.scale_lr4model)
        self.lr4model, self.scale_lr4model = meta_lr4model, meta_scale_lr4model
        exp_idxs, valid_idxs = self.HalfOut(max_epochs=1, max_meta_steps=max_meta_steps,
                                            lr4weights=lr4weights, pseaudo_idxs=pseaudo_idxs) # ferguson 上是0.1, sydney上是0.05
        self.lr4model, self.scale_lr4model = tmp[0], tmp[1]
        if len(valid_idxs) == 0:
            print("===> no valid idxs")
            return exp_idxs, valid_idxs
        self.LogSelectionInfo(entrophys, valid_idxs=valid_idxs)
        self.batch_size = batch_size
        train_idxs = valid_idxs + pseaudo_idxs + exp_idxs
        self.ModelTrain(max_epoch, train_idxs)
        return exp_idxs, valid_idxs


def test_model(model, test_set, test_label, test_suffix, step=0):
    rst_model = Perf(model, test_set, test_label)
    acc_v, (p_v, r_v, f1_v, _) = rst_model
    print(f"Original Performance of {test_suffix}:", rst_model)
    fitlog.add_best_metric({f"Original_{test_suffix}":
                                   {"valid_acc": acc_v, "valid_prec": p_v[1],
                                    "valid_recall": r_v[1], "valid_f1": f1_v[1],
                                               }})
    fitlog.add_metric({f"{test_suffix}":
                           {"valid_acc": acc_v, "valid_prec": p_v[1],
                            "valid_recall": r_v[1], "valid_f1": f1_v[1],
                            }}, step=step)

def train_loop(model, optim, dataset, valid_indices, max_epoch, batch_size):
    for epoch in range(max_epoch):
        sum_loss = 0.
        counter = 0
        for idx, batch in range(0, len(valid_indices), batch_size):
            batch = dataset.collate_raw_batch([ dataset[jj]
                for jj in valid_indices[idx:min(idx+batch_size, len(valid_indices))]])
            cost, acc = model.lossAndAcc(batch)
            model.zero_grad()
            optim.zero_grad()
            cost.backward()
            optim.step()
            torch.cuda.empty_cache()
            print('####Model Update (%3d | %3d) ####, loss = %6.8f, acc = %6.8f' % (
                idx, len(valid_indices), cost.data, acc
            ))
            sum_loss += cost.data
            counter += 1
        mean_loss = (sum_loss * 1.0) / counter
        print("mean loss:", mean_loss)
        if mean_loss < 0.2:  # early stop
            break

def ModelTrain(unlabeled_set, model1, model2, max_epoch, valid_indices, seed1=1, seed2=2):
    random.seed(seed1)
    torch.manual_seed(seed1)
    torch.cuda.manual_seed_all(seed1)
    random.seed(seed1)
    torch.backends.cudnn.deterministic = True
    optimizer_grouped_parameters = [
        {'params': p,
         'lr': lr * pow(0.8, 12 - int(n.split("layer.")[1].split(".", 1)[0])) if "layer." in n \
             else (lr * pow(0.8, 13) if "embedding" in n else lr) \
         # layer-wise fine-tuning
         } for n, p in model1.named_parameters()
    ]
    optim1 = torch.optim.Adam(optimizer_grouped_parameters)
    print("========train model 1=====>")
    train_loop(model1, optim1,
               unlabeled_set, valid_indices,
               max_epoch, 32)

    random.seed(seed2)
    torch.manual_seed(seed2)
    torch.cuda.manual_seed_all(seed2)
    random.seed(seed2)
    torch.backends.cudnn.deterministic = True
    optimizer_grouped_parameters = [
        {'params': p,
         'lr': lr * pow(0.8, 12 - int(n.split("layer.")[1].split(".", 1)[0])) if "layer." in n \
             else (lr * pow(0.8, 13) if "embedding" in n else lr) \
         # layer-wise fine-tuning
         } for n, p in model2.named_parameters()
    ]
    optim2 = torch.optim.Adam(optimizer_grouped_parameters)
    print("========train model 2=====>")
    train_loop(model2, optim2,
               unlabeled_set, valid_indices,
               max_epoch, 32)

def MetaSelfTrain(bertmodel, tr_model_path, anno_model_path, f_set, weak_set, weak_set_label, p_idxs, e_idxs):
    state_dicts = torch.load(anno_model_path)
    bertmodel.load_state_dict(state_dicts)
    entrophy, preds, logits = WeakLabeling(bertmodel, weak_set, pseaudo_idxs=p_idxs + e_idxs)
    # idxs = expandPseaudoSet(bertmodel, anno_model, weak_set, p_idxs + e_idxs, threshold=0.95)
    # p_idxs.extend(idxs)
    p_idxs = []
    state_dicts = torch.load(tr_model_path)
    bertmodel.load_state_dict(state_dicts)
    trainer = MetaSelfTrainer(bertmodel, weak_set, f_set,
                                weak_set_label, exp_idxs=e_idxs, convey_fn=None, lr4model=5e-2,
                                   scale_lr4model=4e-2, max_few_shot_size=100, batch_size=20)
    max_meta_steps = 10
    # if len(e_idxs)>100:
    #     print("expand_idxs length:", len(e_idxs))
    #     trainer.ConstructExpandData(batch_size=100)
    #     max_meta_steps = 5
    exp_idxs, valid_idxs = trainer.BalancedTraining(entrophy, max_epoch=10, batch_size=32,
                                                            max_meta_steps=max_meta_steps,
                                                            lr4weights=0.1, meta_lr4model=1e-1,
                                                            meta_scale_lr4model=5e-6,
                                                            pseaudo_idxs=p_idxs)
    if len(valid_idxs) == 0:
        return exp_idxs, valid_idxs, p_idxs

    rst_model1 = Perf(bertmodel, weak_set, weak_set_label)
    print("%3d | %3d Post-MetaTrain Performance of model1:", rst_model1)
    pseaudo_labels = weak_set.labelTensor()
    rst_s = acc_P_R_F1(weak_set_label[valid_idxs+exp_idxs],
                    pseaudo_labels[valid_idxs+exp_idxs])
    print("###Accuracy On selected instancees", rst_s)
    rst_p = acc_P_R_F1(weak_set_label[p_idxs],
                       pseaudo_labels[p_idxs])
    print("###Accuracy On pseaudo instances", rst_p)
    rst_t = acc_P_R_F1(weak_set_label[p_idxs + valid_idxs+exp_idxs],
                       pseaudo_labels[p_idxs + valid_idxs+exp_idxs])
    print("###Accuracy On training instances", rst_t)
    torch.save(bertmodel.state_dict(), tr_model_path)
    return exp_idxs, valid_idxs, p_idxs


if __name__ == "__main__":
    with open("../../args.pkl", 'rb') as fr:
        args = pickle.load(fr)
    args.model_dir = str(__file__).rstrip(".py")
    # Set all the seeds
    seed = args.seed
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    # See if CUDA available
    device = torch.device("cpu")
    if args.n_gpu > 0 and torch.cuda.is_available():
        print("Training on GPU")
        device = torch.device("cuda:0")

    # model configuration
    batch_size = args.batch_size
    lr = args.lr
    weight_decay = args.weight_decay
    n_epochs = args.n_epochs
    args.full_bert = True
    args.bertPath = "../../../bert_en/"

    batch_size = args.batch_size
    lr = args.lr
    weight_decay = args.weight_decay
    n_epochs = args.n_epochs
    args.full_bert = True
    print("====>", args.full_bert)
    bert_model = 'bert-base-uncased' if args.full_bert else 'distilbert-base-uncased'
    if args.full_bert:
        bert_config = BertConfig.from_pretrained(bert_model, num_labels=2) if args.bertPath is None else \
                        BertConfig.from_pretrained(args.bertPath, num_labels=2)
        tokenizer = BertTokenizer.from_pretrained(bert_model) if args.bertPath is None else \
                        BertTokenizer.from_pretrained(args.bertPath)
    else:
        bert_config = DistilBertConfig.from_pretrained(bert_model, num_labels=2) if args.distillBertPath is None else \
                        DistilBertConfig.from_pretrained(args.distillBertPath, num_labels=2)
        tokenizer = DistilBertTokenizer.from_pretrained(bert_model) if args.distillBertPath is None else \
                        DistilBertTokenizer.from_pretrained(args.distillBertPath)

    log_dir = args.model_dir
    if not os.path.exists(log_dir):
        os.system("mkdir %s" % log_dir)
    else:
        os.system("rm -rf %s" % log_dir)
        os.system("mkdir %s" % log_dir)
    fitlog.set_log_dir(log_dir)
    fitlog.add_hyper({
            "epochs": n_epochs,
            "learning_rate": lr,
            "warmup": args.warmup_steps,
            "weight_decay": weight_decay,
            "batch_size": batch_size,
            "train_split_percentage": args.train_pct,
            "bert_model": bert_model,
            "seed": seed,
            "pretrained_model": args.pretrained_model,
            "tags": ",".join(args.tags)
        }, name=args.run_name)

    bert_config.num_labels = 3
    bert_config.hidden_act = "relu"

    # Create the model
    bert = AugBertForSequenceClassification.from_pretrained(
                    bert_model, config=bert_config).to(device) if args.bertPath is None \
            else AugBertForSequenceClassification.from_pretrained(
                    args.bertPath, config=bert_config).to(device)
    bert.bert.embeddings.aug_type = 'none'
    model = VanillaBert(bert).to(device)

    model1_path = "../../saved/modelSNLI_1.pth"
    model2_path = "../../saved/modelSNLI_2.pth"

    domain_id = 2
    NLI_domain_list = list(NLI_domain_map.keys())
    SNLI_set = NLIDataset("../../../snli_1.0/snli_1.0_train.jsonl", tokenizer=tokenizer)
    new_domain_name = NLI_domain_list[domain_id-1]
    test_set = NLIDataset(f"../../../multinli_1.0/Domain_{new_domain_name}.jsonl", tokenizer=tokenizer)
    new_domain_label = test_set.labelTensor()
    few_shot_set = NLIDataset(f"../../../multinli_1.0/fewshot_Domain_{new_domain_name}.jsonl",
                              tokenizer=tokenizer, max_data_size=20)
    train_iter = 0
    pseaudo_idxs = []
    for _ in range(60):
        e_idxs, v_idxs, pseaudo_idxs = MetaSelfTrain(model, model1_path, model2_path, few_shot_set, test_set,
                                                              new_domain_label, pseaudo_idxs, [])
        rest_cnt = len(test_set) - len(set(pseaudo_idxs))
        if (rest_cnt*1.0/len(test_set)) < 0.05:
            break

        e_idxs, v_idxs, pseaudo_idxs = MetaSelfTrain(model, model2_path, model1_path, few_shot_set, test_set,
                                                        new_domain_label, pseaudo_idxs, [])
        rest_cnt = len(test_set) - len(set(pseaudo_idxs))
        if (rest_cnt * 1.0 / len(test_set)) < 0.05:
            break
        else:
            train_iter += 1