"""
读取2014_corpus.txt数据集信息
"""
import os
import numpy as np
from config.sys_config import CORPUS_2014_DATASET_PATH
from typing import List, Union, Tuple


class Corpus2014Dataset:

    def __init__(self, data_path: str = CORPUS_2014_DATASET_PATH,
                 split: float = 0.8,
                 keep_postag: bool = True,
                 nltk_style: bool = False):
        """
        获取2014人民日报语料库数据集
        :param data_path: 语料库路径
        :param split: 训练集和测试集数据分割比例，默认是训练集占80%
        :param keep_postag: 是否保持每个词后的磁性信息，默认是`True`，即保存。
        :param nltk_style: 是否保存把一个句子进行NLTK风格的形式返回，默认是`False`
        """
        self._data_path = data_path
        self._split = split
        self._keep_postag = keep_postag
        self._nltk_style = nltk_style

        self.__transform()

    def __transform(self):
        assert os.path.exists(self._data_path), '数据集不存在'

        with open(self._data_path, 'r', encoding='utf-8') as r:
            lines = r.readlines()
        self._sentences = [line.split(" ") for line in lines] \
            if self._keep_postag else \
            [[word.split('/')[0] for word in line.split(" ")] for line in lines]
        self._sentences_len = len(self._sentences)

        ids = np.arange(0, self._sentences_len)
        train_idx = np.random.choice(ids, int(self._sentences_len * self._split))

        self._train_ds = [self._sentences[i] for i in train_idx.tolist()]
        self._test_ds = [self._sentences[i] for i in ids[~train_idx].tolist()]

    @property
    def train_data(self) -> Union[List[List[str]], List[List[Tuple[str, str]]]]:
        return self.nltk_style_transform(True) if self._nltk_style else self._train_ds

    @property
    def test_data(self) -> Union[List[List[str]], List[List[Tuple[str, str]]]]:
        return self.nltk_style_transform(False) if self._nltk_style else self._test_ds

    def nltk_style_transform(self, train: bool = True) -> List[List[Tuple[str, str]]]:
        """
        把训练数据或者测试数据转换成NLTK风格
        :param train: 是否是训练数据，默认是`True`
        :return: 返回转换成NLTK风格的数据
        """
        dateset = self._train_ds if train else self._test_ds
        result = []
        for sentence in dateset:
            sentence_list = []
            for word in sentence:
                # 如果原数据保持了词性，这里需要删除词性
                if self._keep_postag:
                    word = word.split('/')[0]
                word_list = list(word)
                for w, l in zip(word_list, self.__get_label(len(word_list))):
                    sentence_list.append((w, l))
            result.append(sentence_list)
        return result

    @classmethod
    def __get_label(cls, word_len) -> List[str]:
        if word_len == 1:
            return ['S']
        elif word_len == 2:
            return ['B', 'S']
        else:
            return ['B'] + ['M'] * (word_len - 2) + ['E']

    @classmethod
    def __feature_and_label(cls,
                            sentence: List[Tuple[str, str]]
                            ) -> Tuple[List[str], List[str]]:
        sentence = np.array(sentence)
        feature = sentence[:, 0]
        label = sentence[:, 1]

        return feature, label

    @classmethod
    def features_and_labels(cls,
                            sentences: List[List[Tuple[str, str]]]
                            ) -> Tuple[List[List[str]], List[List[str]]]:
        """
        把句子的特征和标签分离
        :param sentences: NLTK风格的句子列表
        :return: features和labels列表数据
        """
        result_features = []
        result_labels = []
        for sentence in sentences:
            sentence = np.array(sentence)
            feature = sentence[:, 0]
            label = sentence[:, 1]
            result_features.append(feature)
            result_labels.append(label)
        return result_features, result_labels


if __name__ == '__main__':
    ds = Corpus2014Dataset(keep_postag=True)
    print('keep_postag=True')
    for s in ds.train_data[0:10]:
        print('\t', s)

    print('nltk_style=True')
    ds = Corpus2014Dataset(nltk_style=True)
    for s in ds.train_data[0:10]:
        print('\t', s)

    print('test features_and_labels')
    features, labels = Corpus2014Dataset.features_and_labels(ds.train_data[0:10])
    for f, l in zip(features, labels):
        print('feature: ', f)
        print('label: ', l)
        assert len(f) == len(l), 'feature and label is not equal.'
