# coding=utf-8
# Copyright (C) xxx team - All Rights Reserved
#
# @Version:   3.9.4
# @Software:  PyCharm
# @FileName:  datamodule.py
# @CTime:     2021/5/3 21:39   
# @Author:    Haiyang Yu
# @Email:     xxx
# @UTime:     2021/5/3 21:39
#
# @Description:
#     xxx
#     xxx
#
import os
import json
import codecs
import logging
from typing import List, Dict, Optional, Union
import torch
from torch.utils.data import DataLoader
import pytorch_lightning as pl

logger = logging.getLogger(__name__)


class DataModule(pl.LightningDataModule):
    """

    Args:
        data_dir: str = 'data'
        batch_size: int = 8
        model_name='hfl/rbt3'
    """

    def __init__(self, cfg):
        super(DataModule, self).__init__()
        # config
        self.cwd = cfg.cwd
        self.data_dir = cfg.data_dir
        self.batch_size = cfg.batch_size
        self._bert_sent_max_len = cfg.bert_sent_max_len
        self._num_workers = cfg.num_workers
        self._pin_memory = cfg.pin_memory
        self._continue_test_dataset = cfg.continue_test_dataset

    def setup(self, stage: Optional[str] = None):
        if stage == 'fit' or stage is None:
            self.train_dataset = codecs.open(os.path.join(self.cwd, self.data_dir, 'train.txt'), 'r', encoding='utf-8')
            self.valid_dataset = codecs.open(os.path.join(self.cwd, self.data_dir, 'valid.txt'), 'r', encoding='utf-8')
            self.train = [json.loads(l) for l in self.train_dataset.readlines()]
            self.valid = [json.loads(l) for l in self.valid_dataset.readlines()]
        if stage == 'test' or self._continue_test_dataset == True or stage is None:
            self.test_dataset = codecs.open(os.path.join(self.cwd, self.data_dir, 'test.txt'), 'r', encoding='utf-8')
            self.test = [json.loads(l) for l in self.test_dataset.readlines()]

    def _collate_fn(self, batch):
        batch = sorted(batch, key=lambda b: len(b['ids']), reverse=True)
        max_len = min(len(batch[0]['ids']), self._bert_sent_max_len)

        x, x_mask, x_tag = [], [], []
        for sample in batch:
            len_cur = len(sample['ids'])
            x.append(sample['ids'][:self._bert_sent_max_len] + [0] * (max_len - len_cur))
            x_mask.append([1] * min(len_cur, self._bert_sent_max_len) + [0] * (max_len - len_cur))
            x_tag.append(sample['tags'][:self._bert_sent_max_len] + [0] * (max_len - len_cur))

        return torch.tensor(x), torch.tensor(x_mask), torch.tensor(x_tag)

    def train_dataloader(self) -> DataLoader:
        return DataLoader(self.train,
                          shuffle=True,
                          batch_size=self.batch_size,
                          num_workers=self._num_workers,
                          pin_memory=self._pin_memory,
                          collate_fn=self._collate_fn)

    def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
        return DataLoader(self.valid,
                          shuffle=False,
                          batch_size=self.batch_size,
                          num_workers=self._num_workers,
                          pin_memory=self._pin_memory,
                          collate_fn=self._collate_fn)

    def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
        return DataLoader(self.test,
                          shuffle=False,
                          batch_size=self.batch_size,
                          num_workers=self._num_workers,
                          pin_memory=self._pin_memory,
                          collate_fn=self._collate_fn)

    # def teardown(self):
    #     self.train_dataset.close()
    #     self.valid_dataset.close()
    #     self.test_dataset.close()



if __name__ == '__main__':
    pass
