import sys
sys.path.append("../../")
from datareader import NLI_domain_map
from MetaSelfTrainFramewoks import MetaSelfTrainingV5
import pickle, random, numpy as np
import torch
from model import VanillaBert
from datareader import NLIDataset
from NLI_Utils import reconfig_args, obtain_domain_set, obtain_model

class MetaSelfTrainingV7(MetaSelfTrainingV5):
    def __init__(self, seed, class_num, log_dir, suffix, weight_eta=0.1, lr4model=2e-2,
                 coeff4expandset=1.0, max_few_shot_size=20, Inner_BatchSize=5, meta_step=5, extra_step=5):
        super(MetaSelfTrainingV7, self).__init__(class_num, log_dir, suffix, weight_eta, lr4model, coeff4expandset,
                                                 max_few_shot_size, Inner_BatchSize, meta_step, extra_step)
        self.seed = seed

    def ConstructExpandData(self, weak_set, expand_idxs):
        self.e_label_weight = torch.tensor([1.0 / len(expand_idxs) for _ in range(self.class_num)], device=self.device)
        # extract out the expand data
        print("- - - - - update expand data - - ->")
        self.expand_data_list = [weak_set.collate_raw_batch(
            [weak_set[idx] for idx in expand_idxs[i:min(i + self.max_few_shot_size, len(expand_idxs))]]
        ) for i in range(0, len(expand_idxs), self.max_few_shot_size)]

        # modify the valid_indexs,
        if not hasattr(weak_set, "valid_indexs"):
            weak_set.valid_indexs = [idx for idx in range(len(weak_set)) if not (idx in expand_idxs)]
        else:
            weak_set.valid_indexs = [idx for idx in weak_set.valid_indexs if not (idx in expand_idxs)]
        self.minMetaLoss = 1e8 # the valid set has changed, so we need to reset the global minMetaLoss

    def meanGradOnValSet(self, model, few_shot_data=None, few_shot_data_list=None):
        model.zero_grad()
        assert few_shot_data is not None or few_shot_data_list is not None
        if few_shot_data_list is None:
            loss, acc = self.lossAndAcc(model, few_shot_data, label_weight=self.f_label_weight, reduction="sum")
            loss.backward()
            loss = loss.data.item()
        else:
            print("-------> few shot data list ------>")
            f_loss_list, f_acc_list = [], []
            for i, few_data in enumerate(few_shot_data_list):
                # sum the loss with the averaged weights or the biased weights
                # will allow computing the mean risk on any size of few shot dataset
                f_loss, f_acc = self.lossAndAcc(model, few_data, label_weight=self.f_label_weight, reduction="sum")
                f_loss.backward()
                f_loss_list.append(f_loss.data.item())
                f_acc_list.append(f_acc)
                torch.cuda.empty_cache()
            loss, acc = np.sum(f_loss_list), np.mean(f_acc_list)

        if self.expand_data_list is not None:
            if len(self.expand_data_list) > 0:
                assert hasattr(self, 'e_label_weight')
                print("-------> expand data list ------>")
                e_loss_list, e_acc_list = [], []
                for i, e_data in enumerate(self.expand_data_list):
                    e_loss, e_acc = self.lossAndAcc(model, e_data, label_weight=self.e_label_weight, reduction="sum")
                    (e_loss * self.coefff4expandset).backward()
                    e_loss_list.append(e_loss.data.item())
                    e_acc_list.append(e_acc)
                    torch.cuda.empty_cache()
                exp_loss, exp_acc = np.sum(e_loss_list), np.mean(e_acc_list)
                print(f"##Perf on Meta Expand Set##  : exp_loss/exp_acc = {exp_loss}/{exp_acc}")
                grad_dicts = {n: p.grad.clone() for n, p in model.named_parameters()}
                return grad_dicts, \
                       loss + self.coefff4expandset*exp_loss, \
                       acc
        grad_dicts = {n: p.grad.clone() for n, p in model.named_parameters()}
        return grad_dicts, loss, acc

    def entrophy_select(self, unlabeled_target):
        assert hasattr(unlabeled_target, "logits")
        entrophy = (-1 * unlabeled_target.logits * (unlabeled_target.logits.log())).sum(dim=1)
        topK_idxs = entrophy.argsort()[ 0 : 100 ].tolist()
        return topK_idxs

    def initTrainingEnv(self):
        random.seed(self.seed)
        np.random.seed(self.seed)
        torch.manual_seed(self.seed)
        torch.cuda.manual_seed_all(self.seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
        self.self_train_loop_cnt += 1

    def Training(self, model:VanillaBert, unlabeled_target:NLIDataset, valid_set:NLIDataset, UT_Label,
                 labeled_source:NLIDataset=None, labeled_target:NLIDataset=None,
                 max_epoch=100, max_valid_every=100, model_file="./tmp.pkl"):
        self.initTrainingEnv()
        meta_optim = torch.optim.SGD(model.parameters(), lr=self.lr4model)
        model_optim = torch.optim.Adam(model.parameters(), lr=self.lr4model)

        self.few_shot_data, self.few_shot_data_list = self.FewShotDataList(valid_set)
        if not hasattr(self, "raw_weights"):
            weights = [0.0]*len(unlabeled_target) + \
                        ([] if labeled_source is None else [0.0]*len(labeled_source)) + \
                            ([] if labeled_target is None else [10.0]*len(labeled_target))
            self.raw_weights = torch.tensor(weights, device=self.device)

        if not hasattr(self, "abs_weights"):
            self.abs_weights = self.raw_weights.clone()#.sigmoid()

        step = 0
        self.annotate(model, unlabeled_target)
        expand_idxs_Cur = self.entrophy_select(unlabeled_target)
        self.ConstructExpandData(unlabeled_target, expand_idxs_Cur)
        for epoch in range(max_epoch):
            for batch, indices in self.DataIter(unlabeled_target, labeled_source=labeled_source,
                                                 labeled_target=labeled_target, batch_size=self.batch_size):
                weights = self.abs_weights[indices].clone()
                meta_step_count, new_weights = self.MetaStep(model, meta_optim, batch, weights)
                if new_weights is not None:
                    self.abs_weights[indices] = new_weights
                    self.OptimStep(model, model_optim, batch, new_weights)
                else:
                    loss, acc = self.lossAndAcc(model, batch)
                    loss.backward()
                    model_optim.step()

                if (step+1) % max_valid_every == 0:
                    self.valid(model, unlabeled_target, UT_Label, self.suffix, step)
                else:
                    print(f"step = {step}")
                step += 1

if __name__ == "__main__":
    with open("../../args.pkl", 'rb') as fr:
        argConfig = pickle.load(fr)
    argConfig.model_dir = str(__file__).rstrip(".py")
    # Set all the seeds
    seed = argConfig.seed
    reconfig_args(argConfig)

    # See if CUDA available
    device = torch.device("cpu") if not torch.cuda.is_available() else torch.device("cuda:0")
    model1_path = "../../saved/modelSNLI_1.pth"

    domainID = 2
    fewShotCnt = 100
    NLIDomainList = list(NLI_domain_map.keys())
    newDomainName = NLIDomainList[domainID-1]

    model1, tokenizer = obtain_model(args=argConfig, model_device=device)
    labeledSource, validTarget, unlabeledTarget, labeledTarget = obtain_domain_set(newDomainName,
                                                                                   tokenizer_M=tokenizer,
                                                                                   lt_count=0)
    model1.load_state_dict(torch.load(model1_path))
    trainer = MetaSelfTrainingV7(seed, class_num=3, log_dir=argConfig.model_dir,
                                 suffix="{}_FS{}".format(newDomainName, fewShotCnt),
                                 weight_eta=0.2, lr4model=5e-5, coeff4expandset=1.0, max_few_shot_size=20,
                                 Inner_BatchSize=32, meta_step=50, extra_step=5)
    v_idxs = validTarget.read_indexs.copy()
    np.random.shuffle(v_idxs)
    for _ in range(20):
        validTarget.read_indexs = v_idxs[:len(v_idxs)//2]
        trainer.Training(model1, unlabeledTarget, validTarget, unlabeledTarget.labelTensor().clone(),
                         labeledSource, labeledTarget, max_epoch=3, max_valid_every=30,
                         model_file=f"{argConfig.model_dir}/MetaSelfTrain_{newDomainName}.pkl")
        print("= = = = alternate model = = = => ")
        validTarget.read_indexs = v_idxs[len(v_idxs) // 2:]
        trainer.Training(model1, unlabeledTarget, validTarget, unlabeledTarget.labelTensor().clone(),
                         labeledSource, labeledTarget, max_epoch=3, max_valid_every=30,
                         model_file=f"{argConfig.model_dir}/MetaSelfTrain_{newDomainName}.pkl")