import sys
sys.path.append("../../")
import fitlog
from transformers import AdamW
from transformers import DistilBertConfig
from transformers import DistilBertTokenizer
from transformers import BertTokenizer, BertConfig
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, precision_recall_fscore_support
from datareader import *
from metrics import *
from model import *
import pickle
from tqdm import trange, tqdm
from InstanceReweighting import MetaEvaluatorV2
from torch.utils.data import WeightedRandomSampler
import math

def predLogits(model:VanillaBert, data, idxs=None, batchSize=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in trange(0, len(idxs), batchSize):
            batchIndexs = idxs[i:min(len(idxs), i+batchSize)]
            batch = data.collate_raw_batch([data[idx] for idx in batchIndexs])
            pred = model.predict(batch)
            preds.append(pred)
    predTensor = torch.cat(preds)
    return predTensor

def prediction(model:VanillaBert, data, idxs=None, batchSize=20):
    predTensor = predLogits(model, data, idxs, batchSize)
    vals, idxs = predTensor.sort(dim=1)
    return idxs[:, -1], vals[:, -1]

def accPRF1(yTrue, yPred):
    return accuracy_score(yTrue, yPred.cpu()), \
                precision_recall_fscore_support(yTrue, yPred.cpu())

def Perf(model:VanillaBert, data, label, idxs=None, batchSize=20):
    yPred, _ = prediction(model, data, idxs=idxs, batchSize=batchSize)
    yTrue = label[idxs] if idxs is not None else label
    return accPRF1(yTrue, yPred)

def WeakLabeling(model:VanillaBert, data:NLIDataset, pseaudoIndexs=[], batchSize=20):
    cIdxs = list(set(range(len(data))) - set(pseaudoIndexs))
    predTensor = predLogits(model, data, idxs=cIdxs, batchSize=batchSize)
    confs, preds = predTensor.sort(dim=1)
    weakLabel = preds[:, -1].cpu().numpy()
    data.setLabel(weakLabel, cIdxs)
    entrophy = torch.zeros([len(data)], device=model.device)
    entrophy[cIdxs] = (confs.log().abs() * confs).sum(dim=1)
    data.setConfidence(confs[:, -1].cpu(), cIdxs)
    data.setEntrophy(entrophy.cpu(), cIdxs)
    return entrophy.cpu(), preds[:, -1].cpu(), confs[:, -1].cpu()

def expandPseaudoSet(model:VanillaBert, unlabeled:NLIDataset, skipIdxs=None, threshold=0.95, maxCnt=50):
    if skipIdxs is None:
        cIdxs = list(range(len(unlabeled)))
    else:
        cIdxs = list(set(range(len(unlabeled))) - set(skipIdxs))
    pred1, conf1 = prediction(model, unlabeled, cIdxs)
    pred2 = unlabeled.labelTensor()[cIdxs].to(pred1.device)
    conf2 = unlabeled.confidence[cIdxs].to(conf1.device)
    predEq = (pred1 - pred2).abs().__eq__(0)
    validConf1 = conf1.__gt__(threshold) & predEq
    validConf2 = conf2.__gt__(threshold) & validConf1
    expandIdxs = torch.tensor(cIdxs, device=validConf2.device)[validConf2]
    if len(expandIdxs) > maxCnt:
        confF1 = 2*conf2*conf1/(conf2+conf1)
        sortIdxs = confF1[validConf2].argsort()[-maxCnt:]
        expandIdxs = expandIdxs[sortIdxs].tolist()
    else:
        expandIdxs = expandIdxs.tolist()
    return expandIdxs

def DataIter(dataset, batchsize, validIdxs=None):
    if validIdxs is not None:
        validIdxs = validIdxs if isinstance(validIdxs, torch.Tensor) \
                                    else torch.tensor(validIdxs)
    else:
        validIdxs = torch.arange(len(dataset))
    for i in range(0, len(validIdxs), batchsize):
        trainingIdxs = validIdxs[i:min(i+batchsize, len(validIdxs))]
        yield dataset.collate_raw_batch([dataset[j] for j in trainingIdxs])

def balancedDataIter(dataset, batchsize, validIdxs=None):
    labels = torch.tensor(dataset.label)[validIdxs]
    if validIdxs is not None:
        validIdxs = validIdxs if isinstance(validIdxs, torch.Tensor) \
                                    else torch.tensor(validIdxs)
    else:
        validIdxs = torch.arange(len(dataset))
    posIdxs = validIdxs[labels.__eq__(1)].tolist()
    negIdxs = validIdxs[labels.__eq__(0)].tolist()
    if len(posIdxs) > len(negIdxs):
        maxSize = len(posIdxs)
        negIdxs = negIdxs*(len(posIdxs)//len(negIdxs)) + \
                        random.sample(negIdxs, len(posIdxs)%len(negIdxs))
    else:
        maxSize = len(negIdxs)
        posIdxs = posIdxs * (len(negIdxs) // len(posIdxs)) + \
                        random.sample(posIdxs, len(negIdxs) % len(posIdxs))
    for i in range(0, maxSize, batchsize//2):
        trainingIdxs = posIdxs[i:min(maxSize, i+batchsize//2)] + \
                            negIdxs[i:min(maxSize, i + batchsize // 2)]
        yield dataset.collate_raw_batch([dataset[j] for j in trainingIdxs])


def AugLoss(model:VanillaBert, batch):
    rand = random.random()
    if rand < 0.2:
        model.bert.bert.embeddings.aug_type = "gaussian"
    elif rand < 0.4:
        model.bert.bert.embeddings.aug_type = "g_blur"
    elif rand < 0.6:
        model.bert.bert.embeddings.aug_type = None
        loss, acc = model.lossAndAcc(batch)
        loss.backward()
        model.bert.bert.PreserveGrad()
        model.bert.bert.embeddings.aug_type = "adver"
    elif rand < 0.8:
        model.bert.bert.embeddings.aug_type = "rMask"
    else:
        model.bert.bert.embeddings.aug_type = "rReplace"
    loss, acc = model.lossAndAcc(batch)
    return loss, acc

def ModelTrain(trModel, weakSet, weakSetLabel, maxIter, validIndices, indicesProbs,
               batchSize=32, learningRate=5e-4, threshold=0.2):
    print("train set perf:", accPRF1(weakSetLabel[validIndices],
                                        torch.tensor(weakSet.dataY).argmax(dim=1)[validIndices]))
    lossList = []
    optim = torch.optim.Adam([
        {'params': trModel.parameters(), 'lr': learningRate}
    ])
    for step in range(maxIter):
        batchIdxs =[validIndices[idx] for idx in WeightedRandomSampler(indicesProbs,
                                                                         batchSize,
                                                                         replacement=False)]
        batch = weakSet.collate_raw_batch([weakSet[i] for i in batchIdxs])
        loss, acc = AugLoss(trModel, batch)
        optim.zero_grad()
        loss.backward()
        optim.step()
        torch.cuda.empty_cache()
        print('####Model Update (%3d | %3d) ####, loss = %6.8f' % (
            step, maxIter, loss.data
        ))
        lossList.append(loss.data.item())
        if (step+1) % (len(validIndices)//batchSize) == 0:
            meanLoss = np.mean(lossList)
            lossList = []
            print("mean loss:", meanLoss)
            if meanLoss < threshold:  # early stop
                break

def obtainModel(tfidfVec):
    pass

def obtainDomainSet(fsPrefix, odPrefix, ndPrefix):
    pass

def Convert2BiGCNFormat(data):
    pass

class MetaSelfTrainer(MetaEvaluatorV2):
    def __init__(self, model:VanillaBert, weakSet, fewShotSet, weakSetLabel,
                    expIdxs=[], weakSetWeights=None, conveyFn=None, lr4model=2e-2,
                        coeff4expandset=1.0, maxFewShotSize=20, batchSize=5):
        super(MetaSelfTrainer, self).__init__(model, weakSet, fewShotSet, weakSetLabel, exp_idxs=expIdxs,
                                                weak_set_weights=weakSetWeights, convey_fn=conveyFn, lr4model=lr4model,
                                                coeff4expandset=coeff4expandset, max_few_shot_size=maxFewShotSize,
                                                batch_size=batchSize)
        self.expandBatch = []

    def weightsLR(self, eta=0.01, grad_weights=None):
        return eta / grad_weights.abs().mean()

    def LogSelectionInfo(self, eArr, validIdxs=None):
        indices = torch.arange(len(self.weak_set))
        print(">>>>>>>MetaEvaluate Message>>>>>>>>>>>>>>>")
        posIndices = validIdxs if validIdxs is not None else indices[self.weak_set_weights.__gt__(0.0)]
        labels, preds = self.weak_set_label[indices], self.weak_set.labelTensor[indices]
        print(len(indices))
        print(len(posIndices))
        print(eArr.mean(), eArr[posIndices].mean())
        print("####Selection Acc", accuracy(labels, preds), accuracy(labels[posIndices], preds[posIndices]))
        #         print(precisionScore(labels, preds), precisionScore(labels[posIndices], preds[posIndices]))
        #         print(recallScore(labels, preds), recallScore(labels[posIndices], preds[posIndices]))
        #         print(f1Score(labels, preds), f1Score(labels[posIndices], preds[posIndices]))
        print("####init metric:", precision_recall_fscore_support(labels, preds))
        print("####valid metric:", precision_recall_fscore_support(labels[posIndices], preds[posIndices]))
        print("<<<<<<<<<<<<<<<<<MetaEvaluate Message<<<<<<<<<<<<")

    def PGDPerturbation(self, batch, KStep=5):
        self.model.bert.bert.embeddings.aug_type = None
        maxRadius = 1.0
        initEmbTensor = self.model.bert.bert.embeddings.word_embeddings.weight.data.clone()
        for step in range(KStep):
            loss, acc = self.model.lossAndAcc(batch)
            self.model.zero_grad()
            loss.backward()
            embGrad = self.model.bert.bert.embeddings.word_embeddings.weight.grad.clone()
            if step == 0:
                maxRadius = embGrad.norm()
            self.model.bert.bert.embeddings.word_embeddings.weight.data += 1e-3 * embGrad
            delta = self.model.bert.bert.embeddings.word_embeddings.weight.data - initEmbTensor
            if torch.norm(delta) > maxRadius:
                delta = maxRadius * delta/delta.norm()
                self.model.bert.bert.embeddings.word_embeddings.weight.data = initEmbTensor + delta
        loss, acc = self.model.lossAndAcc(batch)
        self.model.zero_grad()
        loss.backward()
        grad_dicts = {n: p.grad.clone() for n, p in self.model.named_parameters()}
        return grad_dicts

    def PseudoDataList(self, threshold=-1):
        # assert hasattr(self.weak_set, "confidence")
        assert hasattr(self.weak_set, "entrophy")
        entropyTensor = self.weak_set.entrophy
        val, index = entropyTensor.sort()
        if threshold < 0:
            topK = int(len(self.weak_set)*0.1)
            topK = topK if topK < self.max_few_shot_size else \
                        (topK//self.max_few_shot_size)*self.max_few_shot_size
            idxs = index[:topK]
        else:
            idxs = index[val.__le__(threshold)]
            topK = len(idxs) if len(idxs) < self.max_few_shot_size else \
                                (len(idxs) // self.max_few_shot_size) * self.max_few_shot_size
            idxs = idxs[:topK]

        if len(idxs) > self.max_few_shot_size:
            pseudoBatch = None
            pseudoDataList = [self.weak_set.collate_raw_batch(
                [self.weak_set[idxs[j]] for j in range(start_idx, min(start_idx + self.max_few_shot_size, len(idxs)))]
            ) for start_idx in range(0, len(idxs), self.max_few_shot_size)]
        else:
            pseudoBatch = self.weak_set.collate_raw_batch(
                [self.weak_set[idx] for idx in idxs]
            )
            pseudoDataList = None
        return idxs, pseudoBatch, pseudoDataList

    def AdversarialPseudoGrads(self, PGDStep=5, pseudoData=None, pseudoDataList=None):
        assert pseudoData is not None or pseudoDataList is not None
        if pseudoDataList is None:
            gradDicts = self.PGDPerturbation(pseudoData, PGDStep)
        else:
            gradDicts = {}
            for i, pData in enumerate(pseudoDataList):
                tmpGradDicts = self.PGDPerturbation(pData, PGDStep)
                if len(gradDicts) == 0:
                    gradDicts = tmpGradDicts
                else:
                    gradDicts = {name:gradDicts[name]+tmpGradDicts[name] for name in list(gradDicts.keys())}
            gradDicts = {name:gradDicts[name]/(i+1) for name in list(gradDicts.keys())}
        return gradDicts

    def OptimizeWeights(self, weightEta, batch, indices, fewShotData=None, fewShotDataList=None, maxMetaStep=10):
        minLoss, bestAcc = 1e8, 0.0
        initStateDicts = self.model.state_dict()
        initStateDicts = {key: initStateDicts[key].clone() for key in initStateDicts}
        bestWeights = self.weak_set_weights[indices].clone()
        for step in range(maxMetaStep):
            self.model.zero_grad()
            loss = self.LossList(batch)
            sumLoss = (self.weak_set_weights[indices] * loss).sum()
            sumLoss.backward()
            self.model_optim.step()
            self.val_grad_dicts, fewLoss, fewAcc = self.meanGradOnValSet(few_shot_data=fewShotData,
                                                                           few_shot_data_list=fewShotDataList)
            self.model.load_state_dict(initStateDicts)

            if fewAcc > bestAcc:
                bestWeights = self.weak_set_weights[indices].clone()
                bestAcc = fewAcc
            print(f"##Perf on Meta Val Set## {step} | {maxMetaStep} :  \
                    loss/acc = {fewLoss}/{fewAcc} | best acc = {bestAcc} ")
            # print("batch_weights:", self.weak_set_weights[indices])
            # print("u:", u)

            self.model.load_state_dict(initStateDicts)
            grads = self.ComputeGrads4Weights(batch, fewShotData, fewShotDataList)
            weightGrads = -1 * (grads / grads.norm(2))
            # print("uGrads:", uGrads)
            # print("weightGrads:", weightGrads)
            update = weightEta * weightGrads
            self.weak_set_weights[indices] = self.weak_set_weights[indices] - update
        self.weak_set_weights[indices] = bestWeights
        return bestWeights

    def Training(self, maxEpochs, batchSize=32, maxMetaSteps=50,
                    trainLr=5e-5, lr4weights=1.0):
        optimizerGroupedParameters = [
            {'params': p,
             'lr': trainLr * pow(0.8, 12 - int(n.split("layer.")[1].split(".", 1)[0])) if "layer." in n \
                 else (trainLr * pow(0.8, 13) if "embedding" in n else trainLr) \
             # layer-wise fine-tuning
             } for n, p in self.model.named_parameters()
        ]
        optim = torch.optim.Adam(optimizerGroupedParameters)
        fewShotData, fewShotDataList = self.FewShotDataList()
        self.weak_set_weights = torch.zeros(len(self.weak_set), device=self.device)
        train_idxs = list(range(len(self.weak_set)))
        for epoch in range(maxEpochs):
            shuffled_indices = random.sample(train_idxs, len(train_idxs)) * 2
            for i in range(0, len(train_idxs), batchSize):
                batch_idxs = shuffled_indices[i:i+batchSize]
                batch, indices = self.InnerBatch(batch_idxs, device=self.device)
                batch_weights = self.OptimizeWeights(lr4weights, batch, indices, fewShotData,
                                                     fewShotDataList, maxMetaSteps)
                # u = batch_weights.softmax(dim=0)
                loss = self.LossList(batch)
                sumLoss = (batch_weights * loss).sum()
                sumLoss.backward()
                optim.step()

def testModel(model, testLoader, testSuffix):
    rstModel = model.valid(testLoader, allMetrics=True)
    print(f"Original Performance of {testSuffix}:", rstModel)
    fitlog.add_best_metric({f"Original{testSuffix}":
                                            {"validAcc": rstModel[0],
                                             "validLoss": rstModel[1],
                                             "validPrec": rstModel[2],
                                             "validRecall": rstModel[3],
                                             "validF1": rstModel[4]
                                               }})
    fitlog.add_metric({f"{testSuffix}":
                           {"validAcc": rstModel[0],
                            "validLoss": rstModel[1],
                            "validPrec": rstModel[2],
                            "validRecall": rstModel[3],
                            "validF1": rstModel[4]
                            }}, step=0)


def SelfTrain(trModel, pseaudoSet, weakSetLabel, trModelSuffix, trainIter):
    # optim = torch.optim.Adam([
    #         {'params': trModel.parameters(), 'lr': learningRate}
    #     ])
    # ModelTrain(trModel, optim, dataIterator= DataIter(pseaudoSet, batchsize=20), maxEpoch=10, gradientAccumulation=2)
    ModelTrain(trModel, pseaudoSet, weakSetLabel, 10*len(pseaudoSet),
               list(range(len(pseaudoSet))), [1.0/len(pseaudoSet)]*len(pseaudoSet),
               batchSize=32, learningRate=5e-5, threshold=0.1)
    rstModel1 = Perf(trModel, pseaudoSet, weakSetLabel)
    print("%3d | %3d Post-MetaTrain Performance of model1:", rstModel1)
    accS, (pS, rS, f1S, ) = accPRF1(weakSetLabel,
                                            pseaudoSet.labelTensor())
    fitlog.add_metric({f"{trModelSuffix}":
                           {"validAcc": rstModel1[0],
                            "validLoss": rstModel1[1],
                            "validPrec": rstModel1[2],
                            "validRecall": rstModel1[3],
                            "validF1": rstModel1[4],
                            "pseaudoAcc": accS, "pseaudoSrec": pS[1],
                            "pseaudoRecall": rS[1], "pseaudoF1": f1S[1],
                            }}, step=trainIter)

def MetaSelfTrain(bertModel, annoModelPath, trModelPath, fSet, weakSet,
                  weakSetLabel, pIdxs, eIdxs, trModelSuffix, trainIter):
    stateDicts = torch.load(annoModelPath)
    bertModel.load_state_dict(stateDicts)
    entrophy, pseaudoLabels, logits = WeakLabeling(model, weakSet)
    stateDicts = torch.load(trModelPath)
    bertModel.load_state_dict(stateDicts)
    IRWeighting = MetaSelfTrainer(bertModel, weakSet, fSet, weakSetLabel,
                                   expIdxs=eIdxs, conveyFn=None, lr4model=5e-5,
                                    batchSize=20)
    IRWeighting.Training(1, batchSize=32, maxMetaSteps=20,
                            trainLr=5e-5, lr4weights=10.0)
    rstModel1 = Perf(bertModel, weakSet, weakSetLabel)
    print("%3d | %3d Post-MetaTrain Performance of model1:", rstModel1)
    accV, (pV, rV, f1V, _ ) = rstModel1
    # pseaudoLabels = torch.tensor(weakSet.label)
    # accS, (pS, rS, f1S, ) = accPRF1(weakSetLabel[validIdxs],
    #                                 pseaudoLabels[validIdxs])
    # accP, (pP, rP, f1P, ) = accPRF1(weakSetLabel[pIdxs],
    #                                 pseaudoLabels[pIdxs])
    # accT, (pT, rT, f1T, ) = accPRF1(weakSetLabel[pIdxs + validIdxs],
    #                                 pseaudoLabels[pIdxs + validIdxs])
    fitlog.add_metric({f"{trModelSuffix}":
                           {"validAcc": accV, "validPrec": pV.mean(),
                            "validRecall": rV.mean(), "validF1": f1V.mean(),
                            # "selectedNum": len(validIdxs) * 1.0 / len(weakSet),
                            # "selectedAcc": accS, "selectedPrec": pS[1],
                            # "selectedRecall": rS[1], "selectedF1": f1S[1],
                            # "pseaudoAcc": accP, "pseaudoPrec": pP[1],
                            # "pseaudoRecall": rP[1], "pseaudoF1": f1P[1],
                            # "trainAcc": accT, "trainPrec": pT[1],
                            # "trainRecall": rT[1], "trainF1": f1T[1],
                            # "initEntrophy": entrophy.mean(),
                            # "selectedEntrophy": entrophy[validIdxs].mean()
                            }}, step=trainIter)

    return pIdxs


def MetaSelfTrain_ET():
    pass

def MetaSelfTrain_EF():
    pass

if __name__ == "__main__":
    with open("../../args.pkl", 'rb') as fr:
        args = pickle.load(fr)
    args.model_dir = str(__file__).rstrip(".py")
    # args.model_dir = "./tmp/"
    # Set all the seeds
    seed = args.seed
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    # See if CUDA available
    device = torch.device("cpu")
    if args.n_gpu > 0 and torch.cuda.is_available():
        print("Training on GPU")
        device = torch.device("cuda:0")

    # model configuration
    batch_size = args.batch_size
    lr_M = args.lr
    weight_decay = args.weight_decay
    n_epochs = args.n_epochs
    args.full_bert = True
    args.bertPath = "../../../bert_en/"

    args.full_bert = True
    print("====>", args.full_bert)
    bert_model = 'bert-base-uncased' if args.full_bert else 'distilbert-base-uncased'
    if args.full_bert:
        bert_config = BertConfig.from_pretrained(bert_model, num_labels=2) if args.bertPath is None else \
                        BertConfig.from_pretrained(args.bertPath, num_labels=2)
        tokenizer_M = BertTokenizer.from_pretrained(bert_model) if args.bertPath is None else \
                        BertTokenizer.from_pretrained(args.bertPath)
    else:
        bert_config = DistilBertConfig.from_pretrained(bert_model, num_labels=2) if args.distillBertPath is None else \
                        DistilBertConfig.from_pretrained(args.distillBertPath, num_labels=2)
        tokenizer_M = DistilBertTokenizer.from_pretrained(bert_model) if args.distillBertPath is None else \
                        DistilBertTokenizer.from_pretrained(args.distillBertPath)

    log_dir = args.model_dir
    if not os.path.exists(log_dir):
        os.system("mkdir %s" % log_dir)
    else:
        os.system("rm -rf %s" % log_dir)
        os.system("mkdir %s" % log_dir)
    fitlog.set_log_dir(log_dir)
    fitlog.add_hyper({
            "epochs": n_epochs,
            "learning_rate": lr_M,
            "warmup": args.warmup_steps,
            "weight_decay": weight_decay,
            "batch_size": batch_size,
            "train_split_percentage": args.train_pct,
            "bert_model": bert_model,
            "seed": seed,
            "pretrained_model": args.pretrained_model,
            "tags": ",".join(args.tags)
        }, name=args.run_name)

    bert_config.num_labels = 3
    bert_config.hidden_act = "relu"
    # Create the model
    if args.full_bert:
        bert = BertForSequenceClassification.from_pretrained(
                        bert_model, config=bert_config).to(device) if args.bertPath is None \
                else BertForSequenceClassification.from_pretrained(
                        args.bertPath, config=bert_config).to(device)
    else:
        bert = DistilBertForSequenceClassification.from_pretrained(
                    bert_model, config=bert_config).to(device) if args.distillBertPath is None \
                else DistilBertForSequenceClassification.from_pretrained(
                        args.distillBertPath, config=bert_config).to(device)
                
    model = VanillaBert(bert).to(device)
    model1_path = "../../saved/modelSNLI_1.pth"
    model2_path = "../../saved/modelSNLI_2.pth"

    domain_id = 2
    NLI_domain_list = list(NLI_domain_map.keys())
    new_domain_name = NLI_domain_list[domain_id-1]
    test_set = NLIDataset(f"../../../multinli_1.0/Domain_{new_domain_name}.jsonl", tokenizer=tokenizer_M)
    new_domain_label = test_set.labelTensor()
    few_shot_set = NLIDataset(f"../../../multinli_1.0/fewshot_Domain_{new_domain_name}.jsonl",
                              tokenizer=tokenizer_M, max_data_size=100)

    train_iter = 0
    pseaudo_idxs, expand_set_idxs = [], []
    for train_it in range(60):
        MetaSelfTrain(model, model2_path, model1_path, few_shot_set, test_set,
                        new_domain_label, pseaudo_idxs, expand_set_idxs, f"model1_{new_domain_name}", train_it)
        MetaSelfTrain(model, model1_path, model2_path, few_shot_set, test_set,
                      new_domain_label, pseaudo_idxs, expand_set_idxs, f"model2_{new_domain_name}", train_it)