"""  
 数据加载器
"""
#-*- coding : utf-8 -*-
# coding: utf-8
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
import config
import tensorflow_datasets as tfds
from tqdm import tqdm


class DataLoader:
    # 定义初始化函数
    def __init__(self, path=None, num_examples=None, reverse=False):
        # 文件路径
        self.path = path or config.data_path
        # 数据集缓存区大小
        self.buffer_size = config.BUFFER_SIZE
        # 批处理大小
        self.batch_size = config.BATCH_SIZE
        # 输入词典
        self.input_tokenizer = None
        # 目标词典
        self.target_tokenizer = None
        # 读取文件数量
        self.num_examples = num_examples
        self.reverse = reverse

    def load_datasets(self, return_dataset=False):
        """
            读取数据
        :param return_dataset: 是否返回DataSet
        :return: DataFrame
        """
        # 读取数据集文件
        if self.path.endswith(".csv"):
            data_frame = self.load_csv_data()
        elif self.path.endswith(".txt"):
            data_frame = self.load_txt_data()
        else:
            data_frame = None
            print("不支持该类型文件的读取\n目前只支持txt,csv文件")
        if return_dataset:
            # 将DataFrame转换为DataSet类型
            dataset = tf.data.Dataset.from_tensor_slices(data_frame.values)
            return data_frame, dataset

        return data_frame

    def load_csv_data(self):
        """读取csv文件"""
        data_frame = pd.read_csv(self.path, nrows=self.num_examples)

        print("数据集数量:", len(data_frame.values))

        return data_frame

    def load_txt_data(self):
        """读取txt文件"""
        import io
        lines = io.open(self.path, encoding='UTF-8').read().strip().split('\n')
        # 将数据分割为输入目标值
        word_pairs = [[w for w in l.split('\t')] for l in lines[:self.num_examples]]
        # 将数据转换为DataFrame并返回
        data_frame = pd.DataFrame(word_pairs, columns=['strEN', "strCH"])
        print("数据集数量:", len(data_frame.values))
        return data_frame

    def generate_tokenizer(self, dataset, input_tokenizer_save_path=None, target_tokenizer_save_path=None):
        """

        :param dataset:数据集
        :param input_tokenizer_save_path:输入数据词典
        :param target_tokenizer_save_path:目标数据词典
        :return:
        """
        input_tokenizer = tfds.deprecated.text.SubwordTextEncoder.build_from_corpus(
            (inp.numpy() for inp, targ in tqdm(dataset, desc="input_tokenizer")), target_vocab_size=2 ** 13)

        target_tokenizer = tfds.deprecated.text.SubwordTextEncoder.build_from_corpus(
            (targ.numpy() for inp, targ in tqdm(dataset, desc="target_tokenizer")), target_vocab_size=2 ** 13)
        # 保存词典
        input_tokenizer.save_to_file(input_tokenizer_save_path or './input_tokenizer')
        # 保存词典
        target_tokenizer.save_to_file(target_tokenizer_save_path or './target_tokenizer')

    def load_tokenizer(self, inp_tokenizer_path, targ_tokenizer_path):
        """
            加载词典
        :param inp_tokenizer_path:输入词典路径
        :param targ_tokenizer_path:目标词典路径
        :return: inp_tokenizer, targ_tokenizer
        """
        if self.reverse:
            targ_tokenizer = tfds.deprecated.text.SubwordTextEncoder.load_from_file(inp_tokenizer_path)
            inp_tokenizer = tfds.deprecated.text.SubwordTextEncoder.load_from_file(targ_tokenizer_path)
            self.input_tokenizer = inp_tokenizer
            self.target_tokenizer = targ_tokenizer
        else:
            inp_tokenizer = tfds.deprecated.text.SubwordTextEncoder.load_from_file(inp_tokenizer_path)
            targ_tokenizer = tfds.deprecated.text.SubwordTextEncoder.load_from_file(targ_tokenizer_path)
            self.input_tokenizer = inp_tokenizer
            self.target_tokenizer = targ_tokenizer

        return inp_tokenizer, targ_tokenizer

    def split_train_test(self, data, test_size=0.2):
        """
            将数据划分为训练集和测试集并转换为Dataset
        :param data:
        :param test_size: 划分比率
        :return: 训练集和测试集
        """
        # 将数据集划分给训练集和测试集
        train, test = train_test_split(data, test_size=test_size)
        print("训练集数量:{} 测试集数量:{}".format(len(train), len(test)))
        return train, test

    def encode(self, lang1, lang2):
        """
            将文本转换为数字向量并添加开始和结束字符
        :param lang1: 输入文本
        :param lang2: 目标文本
        :return: 返回数字向量
        """
        lang1 = [self.input_tokenizer.vocab_size] + self.input_tokenizer.encode(
            lang1.numpy()) + [self.input_tokenizer.vocab_size + 1]

        lang2 = [self.target_tokenizer.vocab_size] + self.target_tokenizer.encode(
            lang2.numpy()) + [self.target_tokenizer.vocab_size + 1]

        return lang1, lang2

    def tf_encode(self, inp, targ):
        """
            将文本转换为数字向量
        :param inp:输入值
        :param targ:目标值
        :return:
        """
        result_pt, result_en = tf.py_function(self.encode, [inp, targ], [tf.int64, tf.int64])
        result_pt.set_shape([None])
        result_en.set_shape([None])

        return result_pt, result_en

    def filter_max_length(self, x, y, max_length=config.MAX_LENGTH):
        """
            过滤大于等于MAX_LENGTH值的数据
        :param x: 输入
        :param y: 目标
        :param max_length:句子最大长度
        :return:
        """
        return tf.logical_and(tf.size(x) <= max_length,
                              tf.size(y) <= max_length)

    def data_to_dataset(self, data):
        """将数据转换为Dataset类型"""
        if self.reverse:
            dataset = tf.data.Dataset.from_tensor_slices((data['strCH'].values, data["strEN"].values))
        else:
            dataset = tf.data.Dataset.from_tensor_slices((data['strEN'].values, data["strCH"].values))
        dataset = dataset.map(self.tf_encode)
        dataset = dataset.filter(self.filter_max_length)
        # 将数据集缓存到内存中以加快读取速度。
        dataset = dataset.cache()
        dataset = dataset.shuffle(self.buffer_size).padded_batch(self.batch_size)
        dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)

        # 返回数据集
        return dataset


if __name__ == "__main__":
    # 实例化一个数据加载器
    data_loader = DataLoader(path='../data/processed_en-ch.txt', reverse=False)
    # 获取数据DataFrame类型
    data_frame, dataset = data_loader.load_datasets(return_dataset=True)
    # 生成词典
    data_loader.generate_tokenizer(dataset, config.input_tokenizer_path, config.target_tokenizer_path)
    # 导入词典
    inp_tokenizer, targ_tokenizer = data_loader.load_tokenizer(config.input_tokenizer_path,
                               config.target_tokenizer_path)
    # 将数据划分成训练集和测试集
    train, test = data_loader.split_train_test(data_frame, test_size=0.2)
    #
    # # 将数据转换为dataset
    train_dataset = data_loader.data_to_dataset(train)
    test_dataset = data_loader.data_to_dataset(test)
    print(train["strCH"].values[0])
    for inp, tar in train_dataset:
         print(inp.numpy()[0])
         print(tar.numpy()[0])
         print(inp_tokenizer.decode([i for i in inp.numpy()[0] if i < inp_tokenizer.vocab_size]))
         print(targ_tokenizer.decode([i for i in tar.numpy()[0] if i < targ_tokenizer.vocab_size]))
         break

