import sys
sys.path.append("../../")
import fitlog
from transformers import AdamW
from transformers import DistilBertConfig
from transformers import DistilBertTokenizer
from transformers import BertTokenizer
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, precision_recall_fscore_support
from datareader import *
from metrics import *
from model import *
import pickle
from tqdm import trange, tqdm
from InstanceReweighting import MetaEvaluator

def pred_Logits(model:VanillaBert, data, idxs=None, batch_size=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in trange(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i+batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.predict(batch)
            preds.append(pred)
    pred_tensor = torch.cat(preds)
    return pred_tensor

def prediction(model:VanillaBert, data, idxs=None, batch_size=20):
    pred_tensor = pred_Logits(model, data, idxs, batch_size)
    vals, idxs = pred_tensor.sort(dim=1)
    return idxs[:, -1], vals[:, -1]

def expandPseaudoSet(model1, model2, unlabeled, skip_idxs=None, threshold=0.95, max_cnt=50):
    if skip_idxs is None:
        c_idxs = list(range(len(unlabeled)))
    else:
        c_idxs = list(set(range(len(unlabeled))) - set(skip_idxs))
    pred_1, conf_1 = prediction(model1, unlabeled, c_idxs)
    pred_2, conf_2 = prediction(model2, unlabeled, c_idxs)
    pred_eq = (pred_1 - pred_2).abs().__eq__(0)
    valid_conf_1 = conf_1.__gt__(threshold) & pred_eq
    valid_conf_2 = conf_2.__gt__(threshold) & valid_conf_1
    expand_idxs = torch.tensor(c_idxs, device=valid_conf_2.device)[valid_conf_2]
    if len(expand_idxs) > max_cnt:
        conf_f1 = 2*conf_2*conf_1/(conf_2+conf_1)
        sort_idxs = conf_f1[valid_conf_2].argsort()[-max_cnt:]
        expand_idxs = expand_idxs[sort_idxs].tolist()
    else:
        expand_idxs = expand_idxs.tolist()
    return expand_idxs

def acc_P_R_F1(y_true, y_pred):
    return accuracy_score(y_true, y_pred.cpu()), \
                precision_recall_fscore_support(y_true, y_pred.cpu())

def Perf(model:VanillaBert, data, label, idxs=None, batch_size=20):
    y_pred, _ = prediction(model, data, idxs=idxs, batch_size=batch_size)
    y_true = label[idxs] if idxs is not None else label
    return acc_P_R_F1(y_true, y_pred)

def WeakLabeling(model:VanillaBert, data:NLIDataset, pseaudo_idxs=[], batch_size=20):
    c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
    pred_tensor = pred_Logits(model, data, idxs=c_idxs, batch_size=batch_size)
    confs, preds = pred_tensor.sort(dim=1)
    weak_label = preds[:, -1].cpu().numpy()
    data.setLabel(weak_label, c_idxs)
    entrophy = torch.zeros([len(data)], device=model.device)
    entrophy[c_idxs] = (confs.log().abs() * confs).sum(dim=1)
    return entrophy.cpu(), preds[:, -1].cpu(), confs[:, -1].cpu()

def obtain_model(tfidf_vec):
    pass

def obtain_Domain_set(fs_prefix, od_prefix, nd_prefix):
    pass

def Convert_2_BiGCNFormat(data):
    pass

class MetaSelfTrainer(MetaEvaluator):
    def __init__(self, model:VanillaBert, weak_set, few_shot_set,
                weak_set_label, exp_idxs=[], weak_set_weights=None, convey_fn=None,
                 lr4model=2e-2, scale_lr4model=1e-3, coeff4expandset=1.0, max_few_shot_size=20,
                batch_size=5):
        super(MetaSelfTrainer, self).__init__(model, weak_set, few_shot_set,
                                               weak_set_label, exp_idxs, weak_set_weights,
                                               convey_fn, lr4model, scale_lr4model, coeff4expandset,
                                               max_few_shot_size, batch_size)
        self.expand_batch = []

    def LogSelectionInfo(self, e_arr, valid_idxs=None):
        indices = torch.arange(len(self.weak_set))
        print(">>>>>>>MetaEvaluate Message>>>>>>>>>>>>>>>")
        pos_indices = valid_idxs if valid_idxs is not None else indices[self.weak_set_weights.__gt__(0.0)]
        labels, preds = self.weak_set_label[indices], torch.tensor(self.weak_set.data_y)[
            indices].argmax(dim=1)
        print(len(indices))
        print(len(pos_indices))
        print(e_arr.mean(), e_arr[pos_indices].mean())
        print(accuracy_score(labels, preds), accuracy_score(labels[pos_indices], preds[pos_indices]))
        print(precision_score(labels, preds), precision_score(labels[pos_indices], preds[pos_indices]))
        print(recall_score(labels, preds), recall_score(labels[pos_indices], preds[pos_indices]))
        print(f1_score(labels, preds), f1_score(labels[pos_indices], preds[pos_indices]))
        print("<<<<<<<<<<<<<<<<<MetaEvaluate Message<<<<<<<<<<<<")

    def balancedTrainingIter(self, valid_idxs=None):
        labels = torch.tensor(self.weak_set.data_y).argmax(dim=1)[valid_idxs]
        valid_idxs = valid_idxs if isinstance(valid_idxs, torch.Tensor) else torch.tensor(valid_idxs)
        pos_idxs = valid_idxs[labels.__eq__(1)].tolist()
        neg_idxs = valid_idxs[labels.__eq__(0)].tolist()
        if len(pos_idxs) > len(neg_idxs):
            max_size = len(pos_idxs)
            neg_idxs = neg_idxs*(len(pos_idxs)//len(neg_idxs)) + \
                            random.sample(neg_idxs, len(pos_idxs)%len(neg_idxs))
        else:
            max_size = len(neg_idxs)
            pos_idxs = pos_idxs * (len(neg_idxs) // len(pos_idxs)) + \
                            random.sample(pos_idxs, len(neg_idxs) % len(pos_idxs))
        for i in range(0, max_size, self.batch_size//2):
            training_idxs = pos_idxs[i:min(max_size, i+self.batch_size//2)] + \
                                neg_idxs[i:min(max_size, i + self.batch_size // 2)]
            yield self.weak_set.collate_raw_batch([self.weak_set[j] for j in training_idxs])

    def BalancedSelections(self):
        indices = torch.arange(self.weak_set_size)
        weak_labels = torch.tensor(self.weak_set.data_y)[indices].argmax(dim=1)

        init_ratio = weak_labels.sum() * 1.0 / len(weak_labels)
        print(">>>>>>>>>>>>>>>init ratio:", init_ratio)
        valid_indices = indices[self.weak_set_weights.__gt__(0.0)]
        # *************************balance the training data***************************#
        valid_labels = weak_labels[valid_indices]
        ratio = valid_labels.sum() * 1.0 / len(valid_indices)
        print(">>>>>>>>>>>>>>>ratio:", ratio)
        if ratio >= 0.9:
            if init_ratio < 0.5:
                valid_indices = self.weak_set_weights.argsort()[-len(indices)//2:]
            else:
                negs = indices[weak_labels.__eq__(0)]
                idxs = self.weak_set_weights[negs].argsort()[-len(negs) // 3:]
                neg_exp = negs[idxs]
                valid_indices = torch.cat([valid_indices, neg_exp])
            valid_labels = weak_labels[valid_indices]
            ratio = valid_labels.sum() * 1.0 / len(valid_indices)
        elif ratio <= 0.1:
            if init_ratio > 0.5:
                valid_indices = self.weak_set_weights.argsort()[-len(indices) // 2:]
            else:
                pos = indices[weak_labels.__eq__(1)]
                idxs = self.weak_set_weights[pos].argsort()[-len(pos) // 3:]
                pos_exp = pos[idxs]
                valid_indices = torch.cat([valid_indices, pos_exp])
            valid_labels = weak_labels[valid_indices]
            ratio = valid_labels.sum() * 1.0 / len(valid_indices)
        print(">>>>>>>>> modified ratio:", ratio)
        return valid_indices

    def ModelTrain(self, max_epoch, valid_indices):
        # print("trainSet perf:", acc_P_R_F1(new_domain_label[valid_indices],
        #                                         weak_label[valid_indices]))
        for epoch in range(max_epoch):
            start = 0
            sum_loss = 0.
            for batch in self.balancedTrainingIter(valid_indices):
                loss = self.ModelLoss(batch, self.model)
                cost = torch.mean(loss)
                self.model.zero_grad()
                self.model_optim.zero_grad()
                cost.backward()
                self.model_optim.step()
                torch.cuda.empty_cache()
                print('####Model Update (%3d | %3d) ####, loss = %6.8f' % (
                    start, len(valid_indices), loss.data.mean()
                ))
                sum_loss += cost.data
                start += self.batch_size
            mean_loss = (sum_loss * 1.0) / ((len(valid_indices) // self.batch_size) + 1)
            print("mean loss:", mean_loss)
            if mean_loss < 0.2:  # early stop
                break

    def Training(self, entrophys, max_epoch=1, batch_size=32, max_meta_steps=10,
                 lr4weights=0.1, meta_lr4model=1e-1,
                 meta_scale_lr4model=5e-3):
        tmp = (self.lr4model, self.scale_lr4model)
        self.lr4model, self.scale_lr4model = meta_lr4model, meta_scale_lr4model
        exp_idxs = self.Evaluate(max_epochs=1, max_meta_steps=max_meta_steps, lr4weights=lr4weights)  # ferguson 上是0.1, sydney上是0.05
        self.lr4model, self.scale_lr4model = tmp[0], tmp[1]
        self.LogSelectionInfo(entrophys)
        valid_indices = self.BalancedSelections() # select a balanced set based on the weak_set_weights
        self.batch_size = batch_size
        self.ModelTrain(max_epoch, valid_indices)
        return exp_idxs, torch.arange(self.weak_set_size)[self.weak_set_weights.__gt__(0.0)].tolist()

    def BalancedTraining(self, entrophys, max_epoch=1, batch_size=32, max_meta_steps=10,
        lr4weights=0.1, meta_lr4model=1e-1, meta_scale_lr4model=5e-3, pseaudo_idxs=[]):
        tmp = (self.lr4model, self.scale_lr4model)
        self.lr4model, self.scale_lr4model = meta_lr4model, meta_scale_lr4model
        exp_idxs, valid_idxs = self.ValidIndicesOut(max_epochs=1, max_meta_steps=max_meta_steps,
                                            lr4weights=lr4weights, pseaudo_idxs=pseaudo_idxs) # ferguson 上是0.1, sydney上是0.05
        self.lr4model, self.scale_lr4model = tmp[0], tmp[1]
        self.LogSelectionInfo(entrophys, valid_idxs=valid_idxs)
        self.batch_size = batch_size
        train_idxs = valid_idxs + pseaudo_idxs
        self.ModelTrain(max_epoch, train_idxs)
        return exp_idxs, valid_idxs


def test_model(model, test_loader, test_suffix):
    rst_model = model.valid(test_loader, all_metrics=True)
    print(f"Original Performance of {test_suffix}:", rst_model)
    fitlog.add_best_metric({f"Original_{test_suffix}":
                                            {"valid_acc": rst_model[0],
                                             "valid_loss": rst_model[1],
                                             "valid_prec": rst_model[2],
                                             "valid_recall": rst_model[3],
                                             "valid_f1": rst_model[4]
                                               }})
    fitlog.add_metric({f"{test_suffix}":
                           {"valid_acc": rst_model[0],
                            "valid_loss": rst_model[1],
                            "valid_prec": rst_model[2],
                            "valid_recall": rst_model[3],
                            "valid_f1": rst_model[4]
                            }}, step=0)


def SelfTrain():
    pass

def MetaSelfTrain(tr_model, anno_model, f_set, weak_set, weak_set_label, p_idxs, e_idxs, tr_model_suffix, train_iter):
    entrophy, preds, logits = WeakLabeling(anno_model, weak_set, p_idxs=p_idxs + e_idxs)
    idxs = expandPseaudoSet(tr_model, anno_model, weak_set, p_idxs + e_idxs, threshold=0.95)
    p_idxs.extend(idxs)
    IR_weighting = MetaSelfTrainer(tr_model, weak_set, Convert_2_BiGCNFormat(f_set),
                                   weak_set_label, exp_idxs=e_idxs, convey_fn=None, lr4model=5e-2,
                                   scale_lr4model=4e-2, batch_size=20)
    # IR_weighting.ConstructExpandData(batch_size=32)
    exp_idxs, valid_idxs = IR_weighting.BalancedTraining(entrophy, max_epoch=10, batch_size=32, max_meta_steps=10,
                                                            lr4weights=0.1, meta_lr4model=1e-1,
                                                            meta_scale_lr4model=5e-4,
                                                            p_idxs=p_idxs)

    rst_model1 = Perf(tr_model, weak_set, weak_set_label)
    print("%3d | %3d Post-MetaTrain Performance of model1:", rst_model1)
    pseaudo_labels = torch.tensor(weak_set.data_y).argmax(dim=1)
    acc_s, (p_s, r_s, f1_s, _) = acc_P_R_F1(weak_set_label[valid_idxs],
                                            pseaudo_labels[valid_idxs])
    acc_p, (p_p, r_p, f1_p, _) = acc_P_R_F1(weak_set_label[p_idxs],
                                            pseaudo_labels[p_idxs])
    acc_t, (p_t, r_t, f1_t, _) = acc_P_R_F1(weak_set_label[p_idxs + valid_idxs],
                                            pseaudo_labels[p_idxs + valid_idxs])
    fitlog.add_metric({f"{tr_model_suffix}":
                           {"valid_acc": rst_model1[0],
                            "valid_loss": rst_model1[1],
                            "valid_prec": rst_model1[2],
                            "valid_recall": rst_model1[3],
                            "valid_f1": rst_model1[4],
                            "selected_num": len(valid_idxs) * 1.0 / len(weak_set),
                            "selected_acc": acc_s, "selected_prec": p_s[1],
                            "selected_recall": r_s[1], "selected_f1": f1_s[1],
                            "pseaudo_acc": acc_p, "pseaudo_prec": p_p[1],
                            "pseaudo_recall": r_p[1], "pseaudo_f1": f1_p[1],
                            "train_acc": acc_t, "train_prec": p_t[1],
                            "train_recall": r_t[1], "train_f1": f1_t[1],
                            "init_entrophy": entrophy.mean(),
                            "selected_entrophy": entrophy[valid_idxs].mean()
                            }}, step=train_iter)

    return exp_idxs, valid_idxs, p_idxs

def MetaSelfTrain_ET():
    pass

def MetaSelfTrain_EF():
    pass

def readLog(filepath):
    init_haccs = []
    optim_haccs = []
    with open(filepath, "r") as fr:
        for line in fr:
            if "=====> init acc:" in line:
                s = line.strip(")").split(",")
                init_haccs.append(float(s[-2]))
            if "=====> Optimized acc:" in line:
                s = line.strip(")").split(",")
                optim_haccs.append(float(s[-2]))
    assert len(init_haccs) == len(optim_haccs)
    return init_haccs, optim_haccs


if __name__ == "__main__":
    # Define arguments
    # parser = argparse.ArgumentParser()
    # parser.add_argument("--dataset_loc", help="Root directory of the dataset", required=True, type=str)
    # parser.add_argument("--train_pct", help="Percentage of data to use for training", type=float, default=0.8)
    # parser.add_argument("--n_gpu", help="The number of GPUs to use", type=int, default=0)
    # parser.add_argument("--log_interval", help="Number of steps to take between logging steps", type=int, default=1)
    # parser.add_argument("--warmup_steps", help="Number of steps to warm up Adam", type=int, default=200)
    # parser.add_argument("--n_epochs", help="Number of epochs", type=int, default=2)
    # parser.add_argument("--pretrained_model", help="Weights to initialize the model with", type=str, default=None)
    # parser.add_argument("--domains", nargs='+', help='A list of domains to use for training', default=[])
    # parser.add_argument("--seed", type=int, help="Random seed", default=1000)
    # parser.add_argument("--run_name", type=str, help="A name for the run", default="pheme-baseline")
    # parser.add_argument("--model_dir", help="Where to store the saved model", default="wandb_local", type=str)
    # parser.add_argument("--tags", nargs='+', help='A list of tags for this run', default=[])
    # parser.add_argument("--model", help="Name of the model to run", default="VanillaBert")
    # parser.add_argument("--ff_dim", help="The dimensionality of the feedforward network in the sluice", type=int, default=768)
    # parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
    # parser.add_argument("--lr", help="Learning rate", type=float, default=3e-5)
    # parser.add_argument("--weight_decay", help="l2 reg", type=float, default=0.01)
    # parser.add_argument("--lambd", help="l2 reg", type=float, default=10e-3)
    # parser.add_argument("--gradient_accumulation", help="Number of gradient accumulation steps", default=1, type=int)
    # parser.add_argument("--indices_dir", help="If standard splits are being used", type=str, default=None)
    # parser.add_argument("--full_bert", help="Specify to use full bert model", action="store_true")
    # parser.add_argument("--bertPath", help="Specify the path of pretrained bert model", type=str, default=None)
    # parser.add_argument("--distillBertPath", help="Specify the path of pretrained distill bert model", type=str, default=None)
    # args = parser.parse_args()
    with open("../../args.pkl", 'rb') as fr:
        args = pickle.load(fr)
    args.model_dir = str(__file__).rstrip(".py")
    # Set all the seeds
    seed = args.seed
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False


    # See if CUDA available
    device = torch.device("cpu")
    if args.n_gpu > 0 and torch.cuda.is_available():
        print("Training on GPU")
        device = torch.device("cuda:0")

    # model configuration
    batch_size = args.batch_size
    lr = args.lr
    weight_decay = args.weight_decay
    n_epochs = args.n_epochs
    args.full_bert = True
    args.bertPath = "../../../bert_en/"

    batch_size = args.batch_size
    lr = args.lr
    weight_decay = args.weight_decay
    n_epochs = args.n_epochs
    args.full_bert = True
    print("====>", args.full_bert)
    bert_model = 'bert-base-uncased' if args.full_bert else 'distilbert-base-uncased'
    if args.full_bert:
        bert_config = BertConfig.from_pretrained(bert_model, num_labels=2) if args.bertPath is None else \
                        BertConfig.from_pretrained(args.bertPath, num_labels=2)
        tokenizer = BertTokenizer.from_pretrained(bert_model) if args.bertPath is None else \
                        BertTokenizer.from_pretrained(args.bertPath)
    else:
        bert_config = DistilBertConfig.from_pretrained(bert_model, num_labels=2) if args.distillBertPath is None else \
                        DistilBertConfig.from_pretrained(args.distillBertPath, num_labels=2)
        tokenizer = DistilBertTokenizer.from_pretrained(bert_model) if args.distillBertPath is None else \
                        DistilBertTokenizer.from_pretrained(args.distillBertPath)

    train_set = NLIDataset("../../../snli_1.0/snli_1.0_train.jsonl", tokenizer=tokenizer)
    val_set = NLIDataset("../../../snli_1.0/snli_1.0_dev.jsonl", tokenizer=tokenizer)
    train_size = len(train_set)
    val_size = len(val_set)

    accs, Ps, Rs, F1s = [], [], [], []
    # Store labels and logits for individual splits for micro F1
    labels_all, logits_all = [], []

    train_loader = DataLoader(
        train_set,
        batch_size=32,
        shuffle=True,
        collate_fn=collate_SNLI_batch_with_device(torch.device("cuda:0"))
    )
    validation_evaluator = NLIEvaluator(val_set, torch.device("cuda:0"))

    log_dir = args.model_dir
    if not os.path.exists(log_dir):
        os.system("mkdir %s" % log_dir)
    else:
        os.system("rm -rf %s" % log_dir)
        os.system("mkdir %s" % log_dir)
    fitlog.set_log_dir(log_dir)
    fitlog.add_hyper({
            "epochs": n_epochs,
            "learning_rate": lr,
            "warmup": args.warmup_steps,
            "weight_decay": weight_decay,
            "batch_size": batch_size,
            "train_split_percentage": args.train_pct,
            "bert_model": bert_model,
            "seed": seed,
            "pretrained_model": args.pretrained_model,
            "tags": ",".join(args.tags)
        }, name=args.run_name)

    bert_config.num_labels = 3
    bert_config.hidden_act = "relu"
    # Create the model
    if args.full_bert:
        bert = BertForSequenceClassification.from_pretrained(
                        bert_model, config=bert_config).to(device) if args.bertPath is None \
                else BertForSequenceClassification.from_pretrained(
                        args.bertPath, config=bert_config).to(device)
    else:
        bert = DistilBertForSequenceClassification.from_pretrained(
                    bert_model, config=bert_config).to(device) if args.distillBertPath is None \
                else DistilBertForSequenceClassification.from_pretrained(
                        args.distillBertPath, config=bert_config).to(device)
                
    model = VanillaBert(bert).to(device)
    state_dicts = torch.load("./train_basic/model_None.pth")
    model.load_state_dict(state_dicts)

    test_set = NLIDataset("../../../multinli_1.0/multinli_1.0_dev_matched.jsonl", tokenizer=tokenizer)
    test_set.domainSelect(2)
    test_label = test_set.labelTensor()
    e, p, l = WeakLabeling(model, test_set)
    print("acc:", acc_P_R_F1(test_label, test_set.labelTensor()))

    few_shot_set = NLIDataset("../../../multinli_1.0/Domain_slate.jsonl",
                              tokenizer=tokenizer, max_data_size=40)
    DVA = MetaEvaluator(model, test_set, few_shot_set,
                        test_label, exp_idxs=[], weak_set_weights=None, convey_fn=None,
                        lr4model=5e-5, scale_lr4model=1, coeff4expandset=1.0,
                        batch_size=20)
    expand_idxs, valid_idxs = DVA.ValidIndicesOut(max_epochs=1, max_meta_steps=10, lr4weights=0.1, pseaudo_idxs=None)
    weak_label = test_set.labelTensor()
    print("init acc:", acc_P_R_F1(test_label, weak_label))
    print("expand_idxs acc:", acc_P_R_F1(test_label[expand_idxs], weak_label[expand_idxs]))
    print("valid_idxs acc:", acc_P_R_F1(test_label[valid_idxs], weak_label[valid_idxs]))
    pos_weight_idxs = DVA.weak_set_weights.__gt__(0)
    print("pos_weights acc:", acc_P_R_F1(test_label[pos_weight_idxs], weak_label[pos_weight_idxs]))
    logFile = str(__file__).replace(".py", ".log")
    init_haccs, optim_haccs = readLog(logFile)
    print("optimized top-half accuracy:", np.mean(optim_haccs))
