from typing import AnyStr, List, Tuple
from transformers import PreTrainedTokenizer
from transformers import DistilBertConfig
from transformers import DistilBertTokenizer
from transformers import BertConfig
from datareader import NLIDatasetV2, NLI_domain_map, collate_SNLI_batch_with_device, text_to_batch_transformer
from model import VanillaBert, BertForSequenceClassification, DistilBertForSequenceClassification
from transformers import BertTokenizer
import random, numpy as np
import torch

class CRST_Dataset(NLIDatasetV2):
    def __init__(self,
            dataset_path: AnyStr = None,
            domains: List = None,
            tokenizer: PreTrainedTokenizer = None,
            domain_ids: List = None,
            max_data_size: int = -1,
            load_data = True
        ):
        super(NLIDatasetV2, self).__init__(dataset_path, domains, tokenizer, domain_ids, max_data_size, False)
        if load_data:
            self.load_data()

    def load_data(self):
        dataset = self.obtainDataset(self.dataset_path, data_size=self.max_data_size)
        if self.domain_ids is not None and self.domains is not None:
            for i in range(len(self.domain_ids)):
                dataset[dataset['domain'] == NLI_domain_map[self.domains[i]]][2] = self.domain_ids[i]
        self.premise = dataset['sent1'].values
        self.hypothesis = dataset['sent2'].values
        label = torch.tensor(dataset['label'].values, dtype=torch.int32)
        data_y = []
        for y_idx in range(label.max().data.item() + 1):
            data_y.append(
                label.__eq__(y_idx).float()
            )
        self._label = torch.stack(data_y).T.cpu().numpy()
        self._confidence = torch.ones(len(self._label))
        self._entrophy = torch.zeros(len(self._label))
        self.domain = dataset['domain'].values
        self.read_indexs = np.arange(len(self.domain))
        device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        self.collate_raw_batch = collate_SNLI_batch_with_device(device)

    def __getitem__(self, item) -> Tuple:
        idx = self.read_indexs[item]
        input_ids, mask, seg_ids = text_to_batch_transformer([self.premise[idx]],
                                                              self.tokenizer,
                                                              text_pair=[self.hypothesis[idx]])
        return input_ids, mask, seg_ids, self._label[idx], \
                        self.domain[idx], item

def obtain_domain_set(new_domain_name, tokenizer_M, few_shot_cnt=100, lt_count=0):
    SNLI_set = NLIDatasetV2("../../../snli_1.0/snli_1.0_train.jsonl", tokenizer=tokenizer_M)
    testFile = f"../../../multinli_1.0/Domain_{new_domain_name}.jsonl"
    if lt_count != 0:
        filenames = SplitDataFile(testFile, accumulation=[lt_count, -1])
        labeled_target = NLIDatasetV2(filenames[0], tokenizer=tokenizer_M)
        test_set = NLIDatasetV2(filenames[1], tokenizer=tokenizer_M)
    else:
        labeled_target = None
        test_set = NLIDatasetV2(testFile, tokenizer=tokenizer_M)
    val_set = NLIDatasetV2(f"../../../multinli_1.0/fewshot_Domain_{new_domain_name}.jsonl",
                              tokenizer=tokenizer_M, max_data_size=few_shot_cnt)
    return SNLI_set, val_set, test_set, labeled_target

def obtain_model(args, model_device):
    bert_model = 'bert-base-uncased' if args.full_bert else 'distilbert-base-uncased'
    if args.full_bert:
        bert_config = BertConfig.from_pretrained(bert_model, num_labels=2) if args.bertPath is None else \
                        BertConfig.from_pretrained(args.bertPath, num_labels=2)
        tokenizer_M = BertTokenizer.from_pretrained(bert_model) if args.bertPath is None else \
                        BertTokenizer.from_pretrained(args.bertPath)
    else:
        bert_config = DistilBertConfig.from_pretrained(bert_model, num_labels=2) if args.distillBertPath is None else \
                        DistilBertConfig.from_pretrained(args.distillBertPath, num_labels=2)
        tokenizer_M = DistilBertTokenizer.from_pretrained(bert_model) if args.distillBertPath is None else \
                        DistilBertTokenizer.from_pretrained(args.distillBertPath)
    bert_config.num_labels = 3
    bert_config.hidden_act = "relu"
    # Create the model
    if args.full_bert:
        bert = BertForSequenceClassification.from_pretrained(
                        bert_model, config=bert_config).to(model_device) if args.bertPath is None \
                else BertForSequenceClassification.from_pretrained(
                        args.bertPath, config=bert_config).to(model_device)
    else:
        bert = DistilBertForSequenceClassification.from_pretrained(
                    bert_model, config=bert_config).to(model_device) if args.distillBertPath is None \
                else DistilBertForSequenceClassification.from_pretrained(
                        args.distillBertPath, config=bert_config).to(model_device)
    model = VanillaBert(bert).to(model_device)
    return model, tokenizer_M

def obtain_CRST_set(new_domain_name, tokenizer_M, lt_count=0):
    SNLI_set = CRST_Dataset("../../../snli_1.0/snli_1.0_train.jsonl", tokenizer=tokenizer_M)
    testFile = f"../../../multinli_1.0/Domain_{new_domain_name}.jsonl"
    if lt_count != 0:
        filenames = SplitDataFile(testFile, accumulation=[lt_count, -1])
        labeled_target = CRST_Dataset(filenames[0], tokenizer=tokenizer_M)
        test_set = CRST_Dataset(filenames[1], tokenizer=tokenizer_M)
    else:
        labeled_target = None
        test_set = CRST_Dataset(testFile, tokenizer=tokenizer_M)
    val_set = CRST_Dataset(f"../../../multinli_1.0/fewshot_Domain_{new_domain_name}.jsonl",
                              tokenizer=tokenizer_M, max_data_size=100)
    return SNLI_set, val_set, test_set, labeled_target

def SplitDataFile(fname, accumulation=[0, -1]):
    with open(fname, 'r') as fr:
        lines = [line for line in fr]
    lines = random.sample(lines, len(lines))

    accumulation[-1] = len(lines)
    s = fname.rsplit(".", 1)
    start = 0
    out_file_list = []
    for i, end in enumerate(accumulation):
        sub_file = f"{s[0]}_{i}.{s[1]}"
        with open(sub_file, 'w') as fw:
            fw.write("".join(lines[start:end]))
        out_file_list.append(sub_file)
        start = end
    return out_file_list

def reconfig_args(args):
    args.full_bert = True
    args.bertPath = "../../../bert_en/"

    args.full_bert = True
    print("====>", args.full_bert)
    return args
