from typing import AnyStr, List, Tuple
from transformers import PreTrainedTokenizer
from transformers import DistilBertConfig
from transformers import DistilBertTokenizer
from transformers import BertConfig
from sentiDataReader import SentiDatasetV2, Senti_domain_map, text_to_batch_transformer
from sentiDataReader import collate_Senti_batch_with_device
from model import VanillaBert, BertForSequenceClassification, DistilBertForSequenceClassification
from transformers import BertTokenizer
import torch, random, numpy as np

class CRST_Dataset(SentiDatasetV2):
    def __init__(self,
            dataset_path: AnyStr = None,
            domains: List = None,
            tokenizer: PreTrainedTokenizer = None,
            domain_ids: List = None,
            max_data_size: int = -1,
            load_data = False
        ):
        super(CRST_Dataset, self).__init__(dataset_path, domains, tokenizer, domain_ids, max_data_size, False)
        if load_data:
            self.load_data()

    def load_data(self):
        dataset = self.obtainDataset(self.dataset_path, data_size=self.max_data_size)
        if self.domain_ids is not None and self.domains is not None:
            for i in range(len(self.domain_ids)):
                dataset[dataset['domain'] == Senti_domain_map[self.domains[i]]][2] = self.domain_ids[i]
        self.sents = dataset['sent'].values
        label = torch.tensor(dataset['label'].values, dtype=torch.int32)
        data_y = []
        max_label_type = label.max().data.item()
        if max_label_type > 0:
            for y_idx in range(max_label_type + 1):
                data_y.append(
                    label.__eq__(y_idx).float()
                )
            self._label = torch.stack(data_y).T.cpu().numpy()
        else:
            max_label_type = 2
            self._label = np.zeros([len(label), max_label_type], dtype=np.float32)
        self._confidence = torch.ones(len(self._label))
        self._entrophy = torch.zeros(len(self._label))
        self.domain = dataset['domain'].values
        self.read_indexs = np.arange(len(self.domain))
        device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        self.collate_raw_batch = collate_Senti_batch_with_device(device)

def obtain_model(args, model_device):
    bert_model = 'bert-base-uncased' if args.full_bert else 'distilbert-base-uncased'
    if args.full_bert:
        bert_config = BertConfig.from_pretrained(bert_model, num_labels=2) if args.bertPath is None else \
                        BertConfig.from_pretrained(args.bertPath, num_labels=2)
        tokenizer_M = BertTokenizer.from_pretrained(bert_model) if args.bertPath is None else \
                        BertTokenizer.from_pretrained(args.bertPath)
    else:
        bert_config = DistilBertConfig.from_pretrained(bert_model, num_labels=2) if args.distillBertPath is None else \
                        DistilBertConfig.from_pretrained(args.distillBertPath, num_labels=2)
        tokenizer_M = DistilBertTokenizer.from_pretrained(bert_model) if args.distillBertPath is None else \
                        DistilBertTokenizer.from_pretrained(args.distillBertPath)
    bert_config.num_labels = 2
    bert_config.hidden_act = "relu"
    tokenizer_M.model_max_length = 256
    # Create the model
    if args.full_bert:
        bert = BertForSequenceClassification.from_pretrained(
                        bert_model, config=bert_config).to(model_device) if args.bertPath is None \
                else BertForSequenceClassification.from_pretrained(
                        args.bertPath, config=bert_config).to(model_device)
    else:
        bert = DistilBertForSequenceClassification.from_pretrained(
                    bert_model, config=bert_config).to(model_device) if args.distillBertPath is None \
                else DistilBertForSequenceClassification.from_pretrained(
                        args.distillBertPath, config=bert_config).to(model_device)
    model = VanillaBert(bert).to(model_device)
    return model, tokenizer_M

def obtain_domain_set(new_domain_name, tokenizer_M, few_shot_cnt=100, lt_count=0):
    domain_set = set(['books', 'dvd', 'kitchen', 'electronics'])
    domain_set.remove(new_domain_name)
    source_files = [f"../../sentiment_data/{d_name}/all.txt" for d_name in domain_set]
    source_domain = SentiDatasetV2(source_files, tokenizer=tokenizer_M, load_data=True)
    test_target = SentiDatasetV2(
        [f"../../sentiment_data/{new_domain_name}/all.txt"],
        tokenizer=tokenizer_M,
        load_data=True
    )
    unlabeled_target = SentiDatasetV2(
        [f"../../sentiment_data/{new_domain_name}/unl.txt"],
        tokenizer=tokenizer_M,
        load_data=True
    )
    val_set = test_target.Derive(
        random.sample(range(len(test_target)), few_shot_cnt)
    )
    if lt_count > 0:
        labeled_target = test_target.Derive(
            random.sample(range(len(test_target)), lt_count)
        )
        return source_domain, val_set, test_target, labeled_target, unlabeled_target
    else:
        return source_domain, val_set, test_target, None, unlabeled_target


def obtain_CRST_set(new_domain_name, tokenizer_M, few_shot_cnt=100, lt_count=0):
    domain_set = set(['books', 'dvd', 'kitchen', 'electronics'])
    domain_set.remove(new_domain_name)
    source_files = [f"../../sentiment_data/{d_name}/all.txt" for d_name in domain_set]
    source_domain = CRST_Dataset(source_files, tokenizer=tokenizer_M, load_data=True)
    test_target = CRST_Dataset(
        [f"../../sentiment_data/{new_domain_name}/all.txt"],
        tokenizer=tokenizer_M,
        load_data=True
    )
    unlabeled_target = CRST_Dataset(
        [f"../../sentiment_data/{new_domain_name}/unl.txt"],
        tokenizer=tokenizer_M,
        load_data=True
    )
    val_set = test_target.Derive(
        random.sample(range(len(test_target)), few_shot_cnt)
    )
    if lt_count > 0:
        labeled_target = test_target.Derive(
            random.sample(range(len(test_target)), lt_count)
        )
        return source_domain, val_set, test_target, labeled_target, unlabeled_target
    else:
        return source_domain, val_set, test_target, None, unlabeled_target

def SplitDataFile(fname, accumulation=[0, -1]):
    with open(fname, 'r') as fr:
        lines = [line for line in fr]
    lines = random.sample(lines, len(lines))
    accumulation[-1] = len(lines)
    s = fname.rsplit(".", 1)
    start = 0
    out_file_list = []
    for i, end in enumerate(accumulation):
        sub_file = f"{s[0]}_{i}.{s[1]}"
        with open(sub_file, 'w') as fw:
            fw.write("".join(lines[start:end]))
        out_file_list.append(sub_file)
        start = end
    return out_file_list

def reconfig_args(args):
    args.full_bert = True
    args.bertPath = "../../../bert_en/"

    args.full_bert = True
    print("====>", args.full_bert)
    return args
