import jieba
import random
import collections

import torch
from torch.utils import data


def read_data(file_path):
    """读取数据集"""
    with open(file_path, 'r', encoding='utf-8') as f:
        return f.read()


def preprocess_data(text, need_punct=False):
    """预处理数据集"""
    # 将大写字母变为小写
    text = text.replace('\u202f', ' ').replace('\xa0', ' ').lower()
    if need_punct:  # 使用标点符号
        def no_space(char, prev_char):
            return char in set(',.!?，。！？') and prev_char != ' '

        out = [' ' + char if i > 0 and no_space(char, text[i - 1]) else char
               for i, char in enumerate(text)]
        text = ''.join(out)
    else:
        def is_punct(char):
            return char in set(',.!?，。！？')

        out = [char if i > 0 and not is_punct(char) else '' for i, char in enumerate(text)]
        text = ''.join(out)
    return text


def split_text(text, method='word', language='eg'):
    if language == 'eg':
        return text.split(' ')
    elif language == 'ch':
        return split_ch(text, method)
    else:
        raise ValueError("不支持该语言的分词！")


def split_ch(text, method='word'):
    # 先去掉空格后，再分词
    text = text.replace(' ', '')
    if method == 'char':
        out = [char for i, char in enumerate(text)]
    elif method == 'word':
        seg_list = jieba.cut(text)
        zh_words = " ".join(seg_list)
        out = zh_words.split()
    else:
        print("没有这种分词方法！")
        raise
    return out


def tokenize_data(text, num_examples=None, method='word'):
    """词元化数据集"""
    source, target = [], []
    for i, line in enumerate(text.split('\n')):
        if num_examples and i > num_examples:
            break
        parts = line.split('\t')
        if len(parts) == 2:
            source.append(split_text(parts[0], method=method, language='eg'))
            target.append(split_text(parts[1], method=method, language='ch'))
    return source, target


class Vocab:
    """文本词表"""

    # 若某个token出现次数小于min_freq次，则直接丢掉
    # reserved_tokens确定句子开始和结束的token
    def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):
        # 保证不报错
        if tokens is None:
            tokens = []
        if reserved_tokens is None:
            reserved_tokens = []
        # 按出现频率排序
        counter = count_corpus(tokens)
        # 按照词频从大到小排列
        self._token_freqs = sorted(counter.items(), key=lambda x: x[1],
                                   reverse=True)
        # 未知词元的索引为0
        self.idx_to_token = ['<unk>'] + reserved_tokens
        self.token_to_idx = {token: idx
                             for idx, token in enumerate(self.idx_to_token)}
        for token, freq in self._token_freqs:
            if freq < min_freq:
                break
            if token not in self.token_to_idx:
                self.idx_to_token.append(token)
                self.token_to_idx[token] = len(self.idx_to_token) - 1

    def __len__(self):
        return len(self.idx_to_token)

    def __getitem__(self, tokens):
        # 若该单词未在词表中出现则返回unk
        if not isinstance(tokens, (list, tuple)):
            return self.token_to_idx.get(tokens, self.unk)
        return [self.__getitem__(token) for token in tokens]

    def to_tokens(self, indices):
        if not isinstance(indices, (list, tuple)):
            return self.idx_to_token[indices]
        return [self.idx_to_token[index] for index in indices]

    @property
    def unk(self):  # 未知词元的索引为0
        return 0

    @property
    def token_freqs(self):
        return self._token_freqs


def count_corpus(tokens):
    """统计词元的频率,词频统计"""
    # 这里的tokens是1D列表或2D列表
    if len(tokens) == 0 or isinstance(tokens[0], list):
        # 将词元列表展平成一个列表
        tokens = [token for line in tokens for token in line]
    # 对每个token出现的次数计数
    return collections.Counter(tokens)


def load_array(data_arrays, batch_size, is_train=True):
    """构造一个PyTorch数据迭代器"""
    dataset = data.TensorDataset(*data_arrays)
    return data.DataLoader(dataset, batch_size, shuffle=is_train)


def truncate_pad(line, num_steps, padding_token):
    """截断或填充文本序列"""
    if len(line) > num_steps:
        return line[:num_steps]  # 截断
    return line + [padding_token] * (num_steps - len(line))  # 填充


def build_array_data(lines, vocab, num_steps):
    """将机器翻译的文本序列转换成小批量"""
    lines = [vocab[l] for l in lines]
    lines = [l + [vocab['<eos>']] for l in lines]  # 每个句子结束加<eos>
    # 截断和填充后保证每个句子长度一致
    array = torch.tensor([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines])
    # 句子的实际长度（不包含填充pad的长度）
    valid_len = (array != vocab['<pad>']).type(torch.int32).sum(1)
    return array, valid_len


def load_data_translate(file_path, batch_size, num_steps=10, num_examples=1000, method='word'):
    """返回翻译数据集的迭代器和词表"""
    text = preprocess_data(read_data(file_path), need_punct=False)
    source, target = tokenize_data(text, num_examples, method)
    src_vocab = Vocab(source, min_freq=2, reserved_tokens=['<pad>', '<bos>', '<eos>'])
    tgt_vocab = Vocab(target, min_freq=2, reserved_tokens=['<pad>', '<bos>', '<eos>'])
    src_array, src_valid_len = build_array_data(source, src_vocab, num_steps)
    tgt_array, tgt_valid_len = build_array_data(target, tgt_vocab, num_steps)
    data_arrays = (src_array, src_valid_len, tgt_array, tgt_valid_len)
    data_iter = load_array(data_arrays, batch_size)
    return data_iter, src_vocab, tgt_vocab


if __name__ == '__main__':
    data_dir = "../DataSet/en-zh.txt"
    train_iter, src_vocab, tgt_vocab = load_data_translate(data_dir, batch_size=2, num_steps=8)
    for X, X_valid_len, Y, Y_valid_len in train_iter:
        print('X:', X.type(torch.int32))
        print('X的有效长度:', X_valid_len)
        print('Y:', Y.type(torch.int32))
        print('Y的有效长度:', Y_valid_len)
        break
