import sys
sys.path.append("../../")
import fitlog
from transformers import DistilBertConfig
from transformers import DistilBertTokenizer
from transformers import BertTokenizer, BertConfig
from datareader import *
from metrics import *
from model import *
import pickle
from tqdm import trange
import torch

class CRST_NLI(Dataset):
    """
    Implements a dataset for the multidomain sentiment analysis dataset
    """
    def __init__(
            self,
            dataset_path: AnyStr = None,
            domains: List = None,
            tokenizer: PreTrainedTokenizer = None,
            domain_ids: List = None,
            max_data_size: int = -1,
        batchsize = 20, class_num = 3):
        """

        :param dataset_dir: The base directory for the dataset
        :param domains: The set of domains to load data for
        :param: tokenizer: The tokenizer to use
        :param: domain_ids: A list of ids to override the default domain IDs
        """
        super(CRST_NLI, self).__init__()
        self.tokenizer = tokenizer
        dataset = self.obtainDataset(dataset_path, data_size=max_data_size)
        if domain_ids is not None and domains is not None:
            for i in range(len(domain_ids)):
                dataset[dataset['domain'] == domain_map[domains[i]]][2] = domain_ids[i]
        self.tokenizer = tokenizer
        self.premise = dataset['sent1'].values
        self.hypothesis = dataset['sent2'].values

        label_vals = dataset['label'].values
        logits = [label_vals.__eq__(y_id) for y_id in range(class_num)]
        self.__label = np.stack(logits).T.astype(np.float)
        self.__confidence = self.__label.copy()
        self.__entrophy = torch.zeros(len(self.__label))

        self.class_num = class_num
        self.batch_size = batchsize
        self.domain = dataset['domain'].values
        self.read_indexs = np.arange(len(self.domain))
        device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        self.collate_raw_batch = collate_SNLI_batch_with_device(device)

    @property
    def label(self):
        return self.__label[self.read_indexs]

    def setLabel(self, label, idxs):
        indexs = self.read_indexs[idxs]
        self.__label[indexs] = label

    @property
    def confidence(self):
        return self.__confidence[self.read_indexs]

    def setConfidence(self, confidence, idxs):
        indexs = self.read_indexs[idxs]
        self.__confidence[indexs] = confidence

    @property
    def entrophy(self):
        return self.__entrophy[self.read_indexs]

    def setEntrophy(self, entrophy, idxs):
        indexs = self.read_indexs[idxs]
        self.__entrophy[indexs] = entrophy

    def labelTensor(self):
        return torch.tensor(self.__label[self.read_indexs], dtype=torch.int64)

    def __len__(self):
        return len(self.read_indexs)

    def domainSelect(self, domain_id):
        if not domain_id in self.domain:
            print(f"Select domain failed, due to the domain_id {domain_id} is missed in data domains")
        read_indexs = np.arange(len(self.domain))
        self.read_indexs = read_indexs[self.domain.__eq__(domain_id)]

    def obtainDataset(self, dataset_path, data_size=-1):
        data = read_nli(dataset_path)
        if data_size > 0:
            return pd.DataFrame(random.sample(data, data_size))
        return pd.DataFrame(data)

    def __getitem__(self, item) -> Tuple:
        idx = self.read_indexs[item]
        input_ids, mask, seg_ids = text_to_batch_transformer([self.premise[idx]],
                                                              self.tokenizer,
                                                              text_pair=[self.hypothesis[idx]])
        return input_ids, mask, seg_ids, self.__label[idx], \
                        self.domain[idx], item

class SelfTrainingUtils:
    def __init__(self, random_seed):
        self.seed = random_seed

    def acc_P_R_F1(self, y_true, y_pred):
        return accuracy_score(y_true, y_pred.cpu()), \
                    precision_recall_fscore_support(y_true, y_pred.cpu())

    def dataIter(self, pseudo_set, labeled_target=None, batch_size=32):
        p_idxs = list(range(len(pseudo_set))) if not hasattr(pseudo_set, 'valid_indexs') else pseudo_set.valid_indexs
        p_len = len(p_idxs)
        if labeled_target is None:
            l_len = 0
            l_idxs = []
        else:
            l_idxs = list(range(len(labeled_target))) if not hasattr(labeled_target, 'valid_indexs') \
                                                        else labeled_target.valid_indexs
            l_len = len(l_idxs)
        data_size = p_len + l_len
        idxs = random.sample(range(data_size), data_size)*2
        for start_i in range(0, data_size, batch_size):
            batch_idxs = idxs[(start_i):(start_i+batch_size)]
            items = [pseudo_set[p_idxs[idx]] if idx < p_len else \
                        labeled_target[l_idxs[idx-p_len]] for idx in batch_idxs]
            yield pseudo_set.collate_raw_batch(items)

    def Batch2Vecs(self, model: VanillaBert, batch):
        if batch[0].device != model.device:  # data is on a different device
            input_ids, masks, seg_ids = batch[0].to(model.device), \
                                        batch[1].to(model.device), \
                                        batch[2].to(model.device)
        else:
            input_ids, masks, seg_ids = batch[0], batch[1], batch[2]
        encoder_dict = model.bert.bert.forward(
            input_ids=input_ids,
            attention_mask=masks,
            token_type_ids=seg_ids
        )
        return encoder_dict.pooler_output

    def AugBatch2Vecs(self, model:VanillaBert, batch):
        rand = random.random()
        if rand < 0.2:
            model.bert.bert.embeddings.aug_type = "gaussian"
        elif rand < 0.4:
            model.bert.bert.embeddings.aug_type = "g_blur"
        elif rand < 0.6:
            model.bert.bert.embeddings.aug_type = None
            loss, acc = model.lossAndAcc(batch)
            loss.backward()
            model.bert.bert.embeddings.aug_type = "adver"
        elif rand < 0.8:
            model.bert.bert.embeddings.aug_type = "rMask"
        else:
            model.bert.bert.embeddings.aug_type = "rReplace"
        return self.Batch2Vecs(model, batch)

    def lossAndAcc(self, model:VanillaBert, batch, temperature=1.0):
        pooledOutput = self.Batch2Vecs(model, batch)
        logits = model.bert.classifier(pooledOutput)
        preds = F.softmax(logits / temperature, dim=1)
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds - epsilon).abs()  # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
        labels = batch[-2].to(preds.device)
        labels = labels.argmax(dim=1) if labels.dim() == 2 else labels
        loss = F.nll_loss(preds.log(), labels)
        acc = accuracy_score(labels.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def dataset_logits(self, model:VanillaBert, data, idxs=None, batch_size=40):
        preds = []
        if idxs is None:
            idxs = list(range(len(data)))
        for i in trange(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i + batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.predict(batch, temperature=1.0)
            preds.append(pred)
        pred_tensor = torch.cat(preds)
        return pred_tensor

    def dataset_inference(self, model:VanillaBert, data, idxs=None, batch_size=20):
        pred_tensor = self.dataset_logits(model, data, idxs, batch_size)
        vals, idxs = pred_tensor.sort(dim=1)
        return idxs[:, -1], vals[:, -1]

    def augPredict(self, model, batch):
        rand = random.random()
        if rand < 0.2:
            model.bert.bert.embeddings.aug_type = "gaussian"
        elif rand < 0.4:
            model.bert.bert.embeddings.aug_type = "g_blur"
        elif rand < 0.6:
            model.bert.bert.embeddings.aug_type = None
            loss, acc = model.lossAndAcc(batch)
            loss.backward()
            model.bert.bert.embeddings.aug_type = "adver"
        elif rand < 0.8:
            model.bert.bert.embeddings.aug_type = "rMask"
        else:
            model.bert.bert.embeddings.aug_type = "rReplace"
        preds = model.predict(batch)
        return preds

    def perf(self, model:VanillaBert, data, label, idxs=None, batch_size=20):
        with torch.no_grad():
            y_pred, _ = self.dataset_inference(model, data, idxs=idxs, batch_size=batch_size)
        label = label[idxs] if idxs is not None else label
        y_true = label if label.dim() == 1 else label.argmax(dim=1)
        return self.acc_P_R_F1(y_true, y_pred)

class SelfTrainingBase(SelfTrainingUtils):
    def __init__(self, random_seed, log_dir, suffix, model_file, class_num, learning_rate):
        super(SelfTrainingBase, self).__init__(random_seed)
        if not os.path.exists(log_dir):
            os.system("mkdir {}".format(log_dir))
        fitlog.set_log_dir("{}/".format(log_dir), new_log=True)
        self.log_dir = log_dir
        self.suffix = suffix
        self.model_file = model_file
        self.best_valid_acc = 0.0
        self.min_valid_loss = 1e8
        self.class_num = class_num
        self.valid_step = 0
        self.learning_rate = learning_rate

    def obtainOptim(self, model):
        optimizerGroupedParameters = [
                                         {'params': p,
                                          'lr': self.learning_rate * pow(0.8, 12 - int(
                                              n.split("layer.")[1].split(".", 1)[0])) if "layer." in n \
                                              else (self.learning_rate * pow(0.8, 13) if "embedding" in n else self.learning_rate)
                                          # layer-wise fine-tuning
                                          } for n, p in model.named_parameters()
                                     ]
        optim = torch.optim.Adam(optimizerGroupedParameters)
        return optim

    def annotate(self, model: VanillaBert, data:CRST_NLI, pseaudo_idxs=[], batch_size=20):
        c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
        with torch.no_grad():
            pred_tensor = self.dataset_logits(model, data, idxs=c_idxs, batch_size=batch_size)
            weak_label = (pred_tensor > 0.5).long().tolist()
            data.setLabel(weak_label, c_idxs)

    def dataSelection(self, pseudo_set):
        assert hasattr(pseudo_set, "logits")

    def modelTrain(self, max_epoch, tr_model, train_set, valid_set, extra_train=None,
                   best_valid_acc= -1.0, batch_size=32, threshold=0.2):
        valid_acc = best_valid_acc if best_valid_acc != -1 else self.best_valid_acc
        loss_list = []
        tmp_model_file = "{}/{}_tmp.pkl".format(self.log_dir, self.suffix)
        optim = self.obtainOptim(tr_model)
        for epoch in range(max_epoch):
            for step, batch in enumerate(self.dataIter(train_set, extra_train, batch_size=batch_size)):
                loss, acc = self.lossAndAcc(tr_model, batch)
                optim.zero_grad()
                loss.backward()
                optim.step()
                torch.cuda.empty_cache()
                print('####Model Update#### step={} ({} | {}) ####, loss = {}'.format(
                    step, epoch, max_epoch, loss.data.item()
                ))
                loss_list.append(loss.data.item())
            mean_loss = np.mean(loss_list)
            loss_list = []
            print("========> mean loss:", mean_loss)

            rst = self.perf(tr_model, valid_set, valid_set.labelTensor())
            acc_v, (p_v, r_v, f1_v, _) = rst
            output_items = [("valid_acc", acc_v)] + \
                           [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                           [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                           [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
            print("valid perf:", rst)
            fitlog.add_metric({f"ValidPerf_{self.suffix}": dict(output_items)}, step=self.valid_step)
            self.valid_step += 1

            if acc_v > valid_acc:
                valid_acc = acc_v
                torch.save(tr_model.state_dict(), tmp_model_file)
            if mean_loss < threshold:  # early stop
                break
        if os.path.exists(tmp_model_file):
            tr_model.load_state_dict(torch.load(tmp_model_file))
            os.system("rm {}".format(tmp_model_file))
        return valid_acc

    def initTrainingEnv(self, model, labeled_source, labeled_target, unlabeled_set, U_label, isWeightInited=False):
        if not isWeightInited:
            source_val, source_train = labeled_source.split([100./len(labeled_source), 1.0])
            self.modelTrain(10, model, source_train, source_val, best_valid_acc=0.0)
            rst_model = self.perf(model, unlabeled_set, U_label)
            acc_v, (p_v, r_v, f1_v, _) = rst_model
            print(f"Original Performance of {self.suffix}:", rst_model)
            output_items = [("valid_acc", acc_v)] + \
                           [('valid_prec_{}'.format(i), p_v[i])  for i in range(self.class_num)] + \
                           [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                           [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
            fitlog.add_best_metric({f"Original_{self.suffix}": dict(output_items)})
        random.seed(self.seed)
        np.random.seed(self.seed)
        torch.manual_seed(self.seed)
        torch.cuda.manual_seed_all(self.seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
        self.best_valid_acc = 0.0
        self.min_valid_loss = 1e8

    def trainning(self, model, labeled_source, labeled_target, unlabeled_target, UT_label, valid_set,
                  max_iterate=100, isWeightInited=False):
        self.initTrainingEnv(model, labeled_source, labeled_target, unlabeled_target, UT_label, isWeightInited)
        for iterate in range(max_iterate):
            self.annotate(model, unlabeled_target)
            self.dataSelection(unlabeled_target)
            self.modelTrain(1, model, unlabeled_target, valid_set, extra_train=labeled_target)
            self.logPerf(model, unlabeled_target, UT_label, self.suffix)

    def logPerf(self, model, test_set, test_label, test_suffix, step=0):
        rst_model = self.perf(model, test_set, test_label)
        acc_v, (p_v, r_v, f1_v, _) = rst_model
        print("step = {} : ".format(step), rst_model)
        output_items = [("valid_acc", acc_v)] + \
                       [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                       [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                       [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
        fitlog.add_best_metric({f"BestPerf_{self.suffix}": dict(output_items)})

class CRST_LRENT(SelfTrainingBase):
    def __init__(self, random_seed, log_dir, suffix, model_file, class_num,
                    learning_rate=5e-5, alpha=0.1, topK=0.2):
        super(CRST_LRENT, self).__init__(random_seed, log_dir, suffix, model_file, class_num, learning_rate)
        self.alpha = alpha
        self.alpha = alpha
        self.topK = topK

    def annotate(self, model: VanillaBert, data:CRST_NLI, pseaudo_idxs=[], batch_size=20):
        c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
        with torch.no_grad():
            pred_tensor = self.dataset_logits(model, data, idxs=c_idxs, batch_size=batch_size)
            if not hasattr(data, "logits"):
                data.logits = torch.zeros([len(data), self.class_num], device=pred_tensor.device)
            data.logits[c_idxs] = pred_tensor
        topk_vals, _ = pred_tensor.topk(int(self.topK*len(pred_tensor)), dim=0)
        self.lambda_k = topk_vals[-1]
        pseudo_label = (pred_tensor/self.lambda_k).pow(1.0/self.alpha)
        pseudo_label = (pseudo_label/(pseudo_label.sum(dim=1).unsqueeze(-1))).tolist()
        data.setLabel(pseudo_label, c_idxs)

    def dataSelection(self, pseudo_set):
        assert hasattr(pseudo_set, "logits")
        _, indexs = pseudo_set.logits.topk(int(len(pseudo_set)*self.topK), dim=0)
        pseudo_set.valid_indexs = indexs.reshape([-1]).tolist()

def obtain_model(args, model_device):
    bert_model = 'bert-base-uncased' if args.full_bert else 'distilbert-base-uncased'
    if args.full_bert:
        bert_config = BertConfig.from_pretrained(bert_model, num_labels=2) if args.bertPath is None else \
                        BertConfig.from_pretrained(args.bertPath, num_labels=2)
        tokenizer_M = BertTokenizer.from_pretrained(bert_model) if args.bertPath is None else \
                        BertTokenizer.from_pretrained(args.bertPath)
    else:
        bert_config = DistilBertConfig.from_pretrained(bert_model, num_labels=2) if args.distillBertPath is None else \
                        DistilBertConfig.from_pretrained(args.distillBertPath, num_labels=2)
        tokenizer_M = DistilBertTokenizer.from_pretrained(bert_model) if args.distillBertPath is None else \
                        DistilBertTokenizer.from_pretrained(args.distillBertPath)
    bert_config.num_labels = 3
    bert_config.hidden_act = "relu"
    # Create the model
    if args.full_bert:
        bert = BertForSequenceClassification.from_pretrained(
                        bert_model, config=bert_config).to(model_device) if args.bertPath is None \
                else BertForSequenceClassification.from_pretrained(
                        args.bertPath, config=bert_config).to(model_device)
    else:
        bert = DistilBertForSequenceClassification.from_pretrained(
                    bert_model, config=bert_config).to(model_device) if args.distillBertPath is None \
                else DistilBertForSequenceClassification.from_pretrained(
                        args.distillBertPath, config=bert_config).to(model_device)
    model = VanillaBert(bert).to(model_device)
    return model, tokenizer_M

def obtain_domain_set(new_domain_name, tokenizer_M, lt_count=0):
    SNLI_set = CRST_NLI("../../../snli_1.0/snli_1.0_train.jsonl", tokenizer=tokenizer_M, class_num=3)
    testFile = f"../../../multinli_1.0/Domain_{new_domain_name}.jsonl"
    if lt_count != 0:
        filenames = SplitDataFile(testFile, accumulation=[lt_count, -1])
        labeled_target = CRST_NLI(filenames[0], tokenizer=tokenizer_M, class_num=3)
        test_set = CRST_NLI(filenames[1], tokenizer=tokenizer_M, class_num=3)
    else:
        labeled_target = None
        test_set = CRST_NLI(testFile, tokenizer=tokenizer_M)
    val_set = CRST_NLI(f"../../../multinli_1.0/fewshot_Domain_{new_domain_name}.jsonl",
                              tokenizer=tokenizer_M, max_data_size=100, class_num=3)
    return SNLI_set, val_set, test_set, labeled_target

def SplitDataFile(fname, accumulation=[0, -1]):
    with open(fname, 'r') as fr:
        lines = [line for line in fr]
    lines = random.sample(lines, len(lines))

    accumulation[-1] = len(lines)
    s = fname.rsplit(".", 1)
    start = 0
    out_file_list = []
    for i, end in enumerate(accumulation):
        sub_file = f"{s[0]}_{i}.{s[1]}"
        with open(sub_file, 'w') as fw:
            fw.write("".join(lines[start:end]))
        out_file_list.append(sub_file)
        start = end
    return out_file_list

def reconfig_args(args):
    args.full_bert = True
    args.bertPath = "../../../bert_en/"

    args.full_bert = True
    print("====>", args.full_bert)
    return args

if __name__ == "__main__":
    with open("../../args.pkl", 'rb') as fr:
        argConfig = pickle.load(fr)
    argConfig.model_dir = str(__file__).rstrip(".py")
    # Set all the seeds
    seed = argConfig.seed
    reconfig_args(argConfig)

    # See if CUDA available
    device = torch.device("cpu") if not torch.cuda.is_available() else torch.device("cuda:0")
    model1_path = "../../saved/modelSNLI_1.pth"

    domainID = 2
    fewShotCnt = 100
    NLIDomainList = list(NLI_domain_map.keys())
    newDomainName = NLIDomainList[domainID-1]

    model1, tokenizer = obtain_model(args=argConfig, model_device=device)
    labeledSource, validTarget, unlabeledTarget, labeledTarget = obtain_domain_set(newDomainName,
                                                                                   tokenizer_M=tokenizer,
                                                                                   lt_count=0)
    model1.load_state_dict(torch.load(model1_path))
    trainer = CRST_LRENT(seed, argConfig.model_dir,
                         "{}_FS{}".format(newDomainName, fewShotCnt),
                         "{}/model_{}.pth".format(argConfig.model_dir, newDomainName),
                         class_num=3, learning_rate=2e-5, alpha=0.1, topK=0.4)
    trainer.trainning(model1, labeledSource, labeledTarget, unlabeledTarget,
                      unlabeledTarget.labelTensor().clone(), validTarget,
                      max_iterate=100, isWeightInited=True)
