from abc import abstractmethod
from typing import List, Dict, Tuple, Optional

import torch
from ginnm import iter_parallel_map
from ginnm.jsonl import JsonLReader
from torchtext.data.field import Field

from mec.utils.data import BaseTxtDataset
from mec.utils.logger import logger


class Seq2seqDataset(BaseTxtDataset):

    def __init__(self,
                 text_names: List[str],
                 target: str,
                 texts_dict: Dict[str, List[str]],
                 field_dict: Dict[str, Field]
                 ):
        assert target in text_names
        assert set(texts_dict.keys()) == set(text_names)
        assert set(field_dict.keys()) == set(text_names)

        texts = [texts_dict[text_name] for text_name in text_names]
        sort_weights = [1 if k == target else 0 for k in text_names]

        super().__init__(texts, sort_weights)
        self.field_dict = field_dict
        self.texts_dic = texts_dict
        self.text_names = text_names

    def collate_fn(self, batch):
        batch_res = {}
        for idx, name in enumerate(self.text_names):
            raw_txt = [each[idx] for each in batch]
            tensor_txt = self.field_dict[name].process(raw_txt)
            len_txt = torch.tensor([len(each) for each in raw_txt], dtype=torch.long)
            batch_res[name] = {"input": tensor_txt, "length": len_txt, "raw": raw_txt}
        return batch_res


class MulEncDataset:

    def __init__(self,
                 train_path: str,
                 valid_path: str,
                 test_path: str,
                 keys: List[str],
                 target_key: str,
                 ):
        assert target_key in keys
        self.train: List[Dict] = JsonLReader(train_path).read()
        self.valid: List[Dict] = JsonLReader(valid_path).read()
        self.test: List[Dict] = JsonLReader(test_path).read()
        self.keys = keys
        self.target_key = target_key

        self.train_text: Dict[str, List[str]] = None
        self.valid_text: Dict[str, List[str]] = None
        self.test_text: Dict[str, List[str]] = None
        self.field_dict: Dict[str, Field] = None
        self.setup()

    @abstractmethod
    def build_field(self) -> Dict[str, Field]:
        raise NotImplementedError

    @abstractmethod
    def build_texts(self, data: List[Dict]) -> Dict[str, List[str]]:
        pass

    @abstractmethod
    def transform(self, item: Dict) -> Optional[Dict]:
        raise NotImplementedError

    def build_train_texts(self) -> Dict[str, List[str]]:
        t1 = len(self.train)
        # train = iter_parallel_map(self.transform, self.train, workers=4)
        train = [self.transform(each) for each in self.train]
        train = [t for t in train if t is not None]
        t2 = len(train)
        logger.info(f"train set dropped {t2 - t1} items")
        train = self.build_texts(train)
        return train

    def build_valid_texts(self) -> Dict[str, List[str]]:
        v1 = len(self.valid)
        # valid = iter_parallel_map(self.transform, self.valid, workers=4)
        valid = [self.transform(each) for each in self.valid]
        valid = [v for v in valid if v is not None]
        v2 = len(valid)
        logger.info(f"valid set dropped {v2 - v1} items")
        valid = self.build_texts(valid)
        return valid

    def build_test_texts(self) -> Dict[str, List[str]]:
        t1 = len(self.test)
        # test = iter_parallel_map(self.transform, self.test, workers=4)
        test = [self.transform(each) for each in self.test]
        test = [t for t in test if t is not None]
        t2 = len(test)
        logger.info(f"test set dropped {t2 - t1} items")
        test = self.build_texts(test)
        return test

    def setup(self):
        logger.info("Building training data")
        self.train_text = self.build_train_texts()
        logger.info("Building valid data")
        self.valid_text = self.build_valid_texts()
        logger.info("Building test data")
        self.test_text = self.build_test_texts()
        logger.info("Building field")
        self.field_dict = self.build_field()

    def to_datasets(self) -> Tuple[Seq2seqDataset, Seq2seqDataset, Seq2seqDataset]:
        text_names = self.keys
        target = self.target_key

        train_set = Seq2seqDataset(text_names=text_names,
                                   target=target,
                                   texts_dict=self.train_text,
                                   field_dict=self.field_dict
                                   )

        valid_set = Seq2seqDataset(text_names=text_names,
                                   target=target,
                                   texts_dict=self.valid_text,
                                   field_dict=self.field_dict
                                   )

        test_set = Seq2seqDataset(text_names=text_names,
                                  target=target,
                                  texts_dict=self.test_text,
                                  field_dict=self.field_dict
                                  )

        return train_set, valid_set, test_set
