import sys
sys.path.append("../../")
from datareader import NLI_domain_map
from MetaSelfTrainFramewoks import MetaSelfTrainV9
import fitlog, pickle, random, numpy as np
import torch
from model import VanillaBert
from datareader import NLIDataset
from NLI_Utils import reconfig_args, obtain_CRST_set, obtain_model

def Generator1(pseudo_target, labeled_source, labeled_target, batchSize):
    print("Generator1")
    if not hasattr(pseudo_target, 'valid_indexs'):
        pseudo_target.valid_indexs = list(range(len(pseudo_target)))
    if not hasattr(labeled_source, 'valid_indexs'):
        labeled_source.valid_indexs = list(range(len(labeled_source)))
    if not hasattr(labeled_target, 'valid_indexs'):
        labeled_target.valid_indexs = list(range(len(labeled_target)))
    PT_base, LS_base, LT_base = 0, len(pseudo_target), len(pseudo_target)+ len(labeledSource)
    idxsLS = random.sample(labeled_source.valid_indexs, len(labeled_source.valid_indexs)) * 2
    idxsPT = random.sample(pseudo_target.valid_indexs, len(pseudo_target.valid_indexs)) * 2
    if len(labeled_target.valid_indexs) > 5:
        bs_LT = 5
    else:
        bs_LT = len(labeled_target.valid_indexs)
    for i in range(0, len(pseudo_target.valid_indexs), batchSize//2+batchSize):
        # i * bs1 could be larger than the len(labelSource), thus we need to start from the remainder
        start_LS, start_PT = i%len(labeled_source.valid_indexs), i
        end_LS, end_PT = start_LS + batchSize//2, (start_PT + batchSize//2) - bs_LT
        start_PT2, end_PT2 = end_PT, end_PT+batchSize
        items1 = [labeled_source[jj] for jj in idxsLS[start_LS:end_LS]]
        items2 = [pseudo_target[jj] for jj in idxsPT[start_PT:end_PT]]
        idxs_LT = random.sample(labeled_target, bs_LT)
        items3 = [labeled_target[jj] for jj in idxs_LT]
        items4 = [pseudo_target[jj] for jj in idxsPT[start_PT2:end_PT2]]
        yield labeled_source.collate_raw_batch(items1 + items2 + items3), \
          idxsPT[start_PT:end_PT]+ [idx+LS_base for idx in idxsLS[start_LS:end_LS]] + [idx+LT_base for idx in idxs_LT],\
                pseudo_target.collate_raw_batch(items4)

def Generator2(pseudo_target, labeled_source, batchSize):
    print("Generator2")
    if not hasattr(pseudo_target, 'valid_indexs'):
        pseudo_target.valid_indexs = list(range(len(pseudo_target)))
    if not hasattr(labeled_source, 'valid_indexs'):
        labeled_source.valid_indexs = list(range(len(labeled_source)))
    PT_base, LS_base = 0, len(pseudo_target)
    idxsLS = random.sample(labeled_source.valid_indexs, len(labeled_source.valid_indexs)) * 2
    idxsPT = random.sample(pseudo_target.valid_indexs, len(pseudo_target.valid_indexs)) * 2
    for i in range(0, len(pseudo_target.valid_indexs), batchSize // 2 + batchSize):
        start_LS, start_PT = i%len(labeled_source.valid_indexs), i
        end_LS, end_PT = start_LS + batchSize // 2, start_PT + batchSize // 2
        start_PT2, end_PT2 = end_PT, end_PT + batchSize
        items1 = [labeled_source[jj] for jj in idxsLS[start_LS:end_LS]]
        items2 = [pseudo_target[jj] for jj in idxsPT[start_PT:end_PT]]
        items3 = [pseudo_target[jj] for jj in idxsPT[start_PT2:end_PT2]]
        yield labeled_source.collate_raw_batch(items1 + items2), \
              idxsPT[start_PT:end_PT]+ [idx+LS_base for idx in idxsLS[start_LS:end_LS]], \
              pseudo_target.collate_raw_batch(items3)

def Generator3(pseudo_target, batchSize):
    print("Generator3")
    if not hasattr(pseudo_target, 'valid_indexs'):
        pseudo_target.valid_indexs = list(range(len(pseudo_target)))
    idxsPT = random.sample(pseudo_target.valid_indexs, len(pseudo_target.valid_indexs)) * 2
    for i in range(0, len(pseudo_target.valid_indexs), 2*batchSize):
        # i * bs1 could be larger than the len(labelSource), thus we need to start from the remainder
        start_PT, end_PT = i, i+ batchSize
        start_PT2, end_PT2 = end_PT, end_PT + batchSize
        items1 = [pseudo_target[jj] for jj in idxsPT[start_PT:end_PT]]
        items2 = [pseudo_target[jj] for jj in idxsPT[start_PT2:end_PT2]]
        batch1 = pseudo_target.collate_raw_batch(items1)
        batch2 = pseudo_target.collate_raw_batch(items2)
        yield batch1, idxsPT[start_PT:end_PT], batch2

def DataIter(unlabeled_target, labeled_source=None, labeled_target=None, batch_size=32):
    if labeled_target is not None:
        assert len(labeledTarget) > 0
        return  Generator1(unlabeled_target, labeled_source, labeled_target, batch_size)
    elif labeled_source is not None:
        return Generator2(unlabeled_target, labeled_source, batch_size)
    else:
        return Generator3(unlabeled_target, batch_size)

class MetaSelfTrainV11(MetaSelfTrainV9):
    def __init__(self, seed, alpha, topk, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5, extra_step=5):
        super(MetaSelfTrainV11, self).__init__(seed, alpha, topk, class_num, log_dir, suffix, weight_eta, lr4model,
                 coeff4expandset, max_few_shot_size, Inner_BatchSize, meta_step, extra_step)
        self.minMetaLoss = []

    def obtainOptim(self, model, learning_rate):
        optimizerGroupedParameters = [
                                         {'params': p,
                                          'lr': learning_rate * pow(0.8, 12 - int(
                                              n.split("layer.")[1].split(".", 1)[0])) if "layer." in n \
                                              else (learning_rate * pow(0.8, 13) if "embedding" in n else learning_rate)
                                          # layer-wise fine-tuning
                                          } for n, p in model.named_parameters()
                                     ]
        optim = torch.optim.SGD(optimizerGroupedParameters)
        return optim

    def Batch2ExpandData(self, f_batch, e_batch):
        # extract out the expand data
        print("- - - - - update expand data - - ->")
        self.e_label_weight = torch.tensor(
            [1.0 / len(e_batch[-2]) for _ in range(self.class_num)],
            device=self.device
        )
        self.few_shot_data = f_batch
        self.expand_data_list = [e_batch]

    def FewShotDataList(self, few_shot_set):
        f_label = few_shot_set.labelTensor()
        f_label = f_label.argmax(dim=1) if f_label.dim() == 2 else f_label
        cnt = [f_label.__eq__(y_idx).sum().data.item() for y_idx in range(self.class_num)]
        assert sum(cnt) == len(few_shot_set)
        self.f_label_weight = torch.tensor([1.0/(self.class_num*y_cnt) for y_cnt in cnt], device=self.device)
        return None, None

    def MetaStep(self, model:VanillaBert, optim:torch.optim, batch,
                    weight:torch.Tensor, few_data=None, few_data_list=None):
        if (few_data is None) and (few_data_list is None):
            assert hasattr(self, "few_shot_data")
            assert hasattr(self, "few_shot_data_list")
            few_data, few_data_list = self.few_shot_data, self.few_shot_data_list

        self.epsilon = 1e-5
        initStateDicts = model.state_dict()
        initStateDicts = {key: initStateDicts[key].clone() for key in initStateDicts}
        weight_best = None
        minMetaLoss = -1 if len(self.minMetaLoss) < 10 else np.mean(self.minMetaLoss)
        optim_step = self.meta_step
        for step in range(self.meta_step):
            u = weight.sigmoid()
            fewLoss, fewAcc = self.MetaValidation(model1, optim, batch, u, few_data, few_data_list)
            if (step == 0) and (minMetaLoss==-1):
                minMetaLoss = fewLoss
            elif fewLoss < minMetaLoss:
                minMetaLoss = fewLoss
                weight_best = weight.clone()
                if optim_step == self.meta_step:
                    optim_step = step

            print(f"##Perf on Meta Val Set## {step} | {self.meta_step} :  \
                        loss/acc = {fewLoss}/{fewAcc}, PreFewLoss={minMetaLoss}")
            if step == 0:
                fitlog.add_metric({"meta_valid_loss" : fewLoss}, step=self.counter)
                fitlog.add_metric({"meta_valid_acc" : fewAcc}, step=self.counter)
                self.counter += 1

            if (step == optim_step + self.extra_step) or ((step+1)==self.meta_step):
                break

            model.load_state_dict(initStateDicts)
            u_grads = self.ComputeGrads4Weights(model, batch, self.few_shot_data, self.few_shot_data_list)
            # print("u_grads : ", u_grads)
            w_grads = u_grads*u*(1-u)
            weightGrads = -1*w_grads / (w_grads.norm(2) + 1e-8)
            # weightGrads = -1* w_grads#.sign()
            update = self.weight_eta * weightGrads
            weight = weight - update#*(weight_mask.to(update.device))

        u = weight.sigmoid()
        fewLoss, fewAcc = self.MetaValidation(model1, optim, batch, u, few_data, few_data_list)
        if fewLoss < minMetaLoss:
            minMetaLoss = fewLoss
            weight_best = weight.clone()
        model.load_state_dict(initStateDicts)
        self.minMetaLoss.append(minMetaLoss)
        if len(self.minMetaLoss) > 10:
            self.minMetaLoss.pop(0)
        return step+1, weight_best

    def Training(self, model:VanillaBert, unlabeled_target:NLIDataset, valid_set:NLIDataset, UT_Label,
                 labeled_source:NLIDataset=None, labeled_target:NLIDataset=None,
                 max_epoch=100, max_valid_every=100, few_update_every=10,
                 model_file="./tmp.pkl"):
        self.initTrainingEnv()
        meta_optim = torch.optim.SGD(model.parameters(), lr=self.lr4model)
        model_optim = self.obtainOptim(model, self.lr4model)
        self.few_shot_data, self.few_shot_data_list = self.FewShotDataList(valid_set)

        if not hasattr(self, "raw_weights"):
            weights = [0.0]*len(unlabeled_target) + \
                        ([] if labeled_source is None else [0.0]*len(labeled_source)) + \
                            ([] if labeled_target is None else [10.0]*len(labeled_target))
            self.raw_weights = torch.tensor(weights, device=self.device)

        step = 0
        self.annotate(model, unlabeled_target)
        for epoch in range(max_epoch):
            for batch, indices, e_batch in DataIter(unlabeled_target, labeled_source=labeled_source,
                                                 labeled_target=labeled_target, batch_size=self.batch_size):
                f_batch = valid_set.collate_raw_batch(
                    [valid_set[kk] for kk in random.sample(range(len(valid_set)), self.max_few_shot_size)]
                )
                self.Batch2ExpandData(f_batch, e_batch)
                weights = self.raw_weights[indices].clone()
                meta_step_count, new_weights = self.MetaStep(model, meta_optim, batch, weights)
                if new_weights is not None:
                    self.raw_weights[indices] = new_weights
                    self.OptimStep(model, model_optim, batch, new_weights)
                else:
                    loss, acc = self.lossAndAcc(model1, batch)
                    loss.backward()
                    model_optim.step()

                if (step+1) % max_valid_every == 0:
                    self.valid(model, unlabeled_target, UT_Label, self.suffix, step)
                else:
                    print(f"step = {step}")
                step += 1

if __name__ == "__main__":
    with open("../../args.pkl", 'rb') as fr:
        argConfig = pickle.load(fr)
    argConfig.model_dir = str(__file__).rstrip(".py")
    # Set all the seeds
    seed = argConfig.seed
    reconfig_args(argConfig)

    # See if CUDA available
    device = torch.device("cpu") if not torch.cuda.is_available() else torch.device("cuda:0")
    model1_path = "../../saved/modelSNLI_1.pth"

    domainID = 2
    fewShotCnt = 100
    NLIDomainList = list(NLI_domain_map.keys())
    newDomainName = NLIDomainList[domainID-1]

    model1, tokenizer = obtain_model(args=argConfig, model_device=device)
    labeledSource, validTarget, unlabeledTarget, labeledTarget = obtain_CRST_set(newDomainName,
                                                                                   tokenizer_M=tokenizer,
                                                                                   lt_count=0)
    model1.load_state_dict(torch.load(model1_path))
    trainer = MetaSelfTrainV11(seed, alpha=0.1, topk=0.2, class_num=3, log_dir=argConfig.model_dir,
                                 suffix="{}_FS{}".format(newDomainName, fewShotCnt),
                                 weight_eta=1.0, lr4model=5e-5, coeff4expandset=0.1, max_few_shot_size=20,
                                 Inner_BatchSize=32, meta_step=10, extra_step=5)
    v_idxs = validTarget.read_indexs.copy()
    np.random.shuffle(v_idxs)
    ULabel = unlabeledTarget.labelTensor()
    ULabel = ULabel.argmax(dim=1) if ULabel.dim() == 2 else ULabel.clone()
    for _ in range(40):
        validTarget.read_indexs = v_idxs[:len(v_idxs)//2]
        trainer.Training(model1, unlabeledTarget, validTarget, ULabel,
                         labeledSource, labeledTarget, max_epoch=4, max_valid_every=30,
                         model_file=f"{argConfig.model_dir}/MetaSelfTrain_{newDomainName}.pkl")
        print("= = = = alternate model = = = => ")
        validTarget.read_indexs = v_idxs[len(v_idxs) // 2:]
        trainer.Training(model1, unlabeledTarget, validTarget, ULabel,
                         labeledSource, labeledTarget, max_epoch=4, max_valid_every=30,
                         model_file=f"{argConfig.model_dir}/MetaSelfTrain_{newDomainName}.pkl")