
import os
import json
import pickle
from tqdm import tqdm
import logging

from torch.utils.data import Dataset, DataLoader

logger = logging.getLogger(__name__)


class FileOperation(object):
    """ 文件读写类 """

    @classmethod
    def save_text_to_file(cls, texts, path, encoding='utf-8'):
        with open(path, 'w', encoding=encoding) as f:
            f.write(texts)

    @classmethod
    def save_json_to_file(cls, obj, path):
        with open(path, 'w') as f:
            json.dump(obj, f)

    @classmethod
    def save_plk_to_file(cls, obj, path):
        with open(path, 'wb') as f:
            pickle.dump(obj, f)

    @classmethod
    def read_texts_file(cls, path, encoding='utf-8'):
        with open(path, 'r', encoding=encoding) as f:
            texts = f.read()
        return texts

    @classmethod
    def read_json_file(cls, path):
        with open(path, 'r') as f:
            obj = json.load(f)
        return obj

    @classmethod
    def read_plk_file(cls, path):
        with open(path, 'rb') as f:
            obj = pickle.load(f)
        return obj


class DataSetOperation(object):
    """ 数据集读写操作

    原数据集的文件结构
    dataset name
    - train.json
    - val.json
    - test.json
    - buff
        - train_{flag}.plk
        - val_{flag}.plk
        - .....

    主要参数
    mode: 'train'、'val'、'test' 数据类型
    flag: 标记，缓存文件的时候加入文件名进行标记

    """
    def __init__(self, dataset_dir, flag=None):
        self.dataset_dir = dataset_dir
        self.flag = flag
        self.buff_dir_name = 'buff'
        self.data_extend = 'json'  # 数据集的文件后缀。（固定为.json的后缀）
        self.buff_data_extend = 'plk'

    def _get_path_(self, mode, buff=False, file_extend='json', check_create_dir=True):
        """  获取文件地址 """
        if buff:
            file_dir = os.path.join(self.dataset_dir, self.buff_dir_name)
        else:
            file_dir = self.dataset_dir

        if buff and (self.flag is not None):
            file_name = f"{mode}_{self.flag}.{file_extend}"
        else:
            file_name = f"{mode}.{file_extend}"

        if buff and check_create_dir and (not os.path.exists(file_dir)):
            os.makedirs(file_dir)

        return os.path.join(file_dir, file_name)

    def data_exists(self, mode='train'):
        return os.path.exists(self._get_path_(mode=mode, buff=False, file_extend=self.data_extend))

    def data(self, mode='train'):
        return FileOperation.read_json_file(self._get_path_(mode=mode, buff=False, file_extend=self.data_extend))

    def buff_data_exists(self, mode='train'):
        return os.path.exists(self._get_path_(mode=mode, buff=True, file_extend=self.buff_data_extend))

    def buff_data(self, mode='train'):
        return FileOperation.read_plk_file(self._get_path_(mode=mode, buff=True, file_extend=self.buff_data_extend))

    def save_buff_data(self, obj, mode='train'):
        save_path = self._get_path_(mode=mode, buff=True, file_extend=self.buff_data_extend)
        FileOperation.save_plk_to_file(obj, path=save_path)


class TextDataSet(Dataset):
    def __init__(self, data, sample_ids_encode_func):
        """ sample_ids_encode_func: 将sample 处理进行encode 的 方法 """
        self.data = data
        self.sample_ids_encode_func = sample_ids_encode_func

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        sample_ids = self.data[index]
        return self.sample_ids_encode_func(sample_ids)


class TextProcess(object):
    def __init__(self, dataset_dir, flag=None):
        self.dataset_operation = DataSetOperation(dataset_dir=dataset_dir, flag=flag)

    def data(self, mode='train'):
        return self.dataset_operation.data(mode=mode)

    def _data_ids_(self, mode='train', check_exists=True):
        if self.dataset_operation.buff_data_exists(mode=mode) and check_exists:
            return self.dataset_operation.buff_data(mode=mode)
        data = self.data(mode=mode)
        data_ids_list = []
        for sample in tqdm(data, desc=f"{mode} data to ids"):
            sample_ids = self.sample_to_ids(sample)
            if sample_ids is None:
                continue
            data_ids_list.append(sample_ids)
        if len(data_ids_list) == 0:
            logger.warning(f"数据集 mode:{mode}，处理完成后数量为0 请注意")
        self.dataset_operation.save_buff_data(data_ids_list, mode=mode)
        return data_ids_list

    def dataloader(self, mode='train', batch_size=32, shuffle=True, check_exists=True):
        data = self._data_ids_(mode=mode, check_exists=check_exists)
        dataset = TextDataSet(data=data, sample_ids_encode_func=self.encode_sample_ids)
        dataloader = DataLoader(dataset=dataset,
                                collate_fn=self.batch_collate,
                                batch_size=batch_size,
                                shuffle=shuffle)
        return dataloader

    def sample_to_ids(self, sample):
        """ 将一个样本转化成ids """
        raise NotImplementedError(f"子类实现 sample_to_ids 方法")

    def encode_sample_ids(self, sample_ids):
        """ 将一个样本ids 进行编码（如，input_ids, type_ids, label_ids） """
        raise NotImplementedError(f"子类实现 encode_sample_ids 方法")

    def batch_collate(self, batch):
        """ 输出的一个 batch 数据进行整理
        batch 为 list, list的数量为每个批次数量，
            每个item为 encode_sample_ids 返回的数据
        """
        raise NotImplementedError(f"子类实现 batch_collate 方法")
