#! -*- coding:utf-8 -*-

import json
import numpy as np
import torch
from torch.utils.data.dataloader import DataLoader
from functools import partial

from .abc_dataset import AbstractDataset


def seq_padding(X):
    L = [len(x) for x in X]
    ML = max(L)
    return [x + [0] * (ML - len(x)) for x in X]


def seq_padding_vec(X):
    L = [len(x) for x in X]
    ML = max(L)
    return [x + [[1, 0]] * (ML - len(x)) for x in X]


def sort_all(batch, lens):
    """ Sort all fields by descending order of lens, and return the original indices. """
    unsorted_all = [lens] + [range(len(lens))] + list(batch)
    sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))]
    return sorted_all[2:], sorted_all[1]


class Seq2UmtDataset(AbstractDataset):
    def __init__(self, hyper, dataset=None, sentence_list=None):
        super(Seq2UmtDataset, self).__init__(hyper, dataset, sentence_list)

        self.text_list = []
        self.T = []
        s_lines = []
        if dataset is not None:
            line_file = open(dataset, "r", encoding="utf-8")
            for instance in line_file:
                s_lines.append(json.loads(instance)["text"])
        elif sentence_list is not None:
            s_lines = sentence_list
        else:
            raise "请输入要处理的句子"

        for line in s_lines:
            line = line.strip("\n")
            text_id = [
                self.word_vocab.get(c, self.word_vocab["<oov>"])
                for c in self.hyper.tokenizer(line)
            ]
            assert len(text_id) > 0
            self.T.append(text_id)
            self.text_list.append(self.hyper.tokenizer(line))
        self.T = np.array(seq_padding(self.T))

    def __getitem__(self, index):
        return self.T[index], self.text_list[index], len(self.text_list[index])

    def __len__(self):
        return len(self.text_list)


class BatchReader(object):
    def __init__(self, data):
        transposed_data = list(zip(*data))

        lens = transposed_data[2]
        transposed_data, orig_idx = sort_all(transposed_data, lens)
        self.orig_idx = orig_idx
        self.T = torch.LongTensor(np.array(transposed_data[0]))
        self.tokens_id = self.T  # todo 新加了这一行
        self.text = transposed_data[1]
        self.length = transposed_data[2]

    def pin_memory(self):
        self.T = self.T.pin_memory()
        return self


def collate_fn(batch):
    return BatchReader(batch)


Seq2umt_loader = partial(DataLoader, collate_fn=collate_fn, pin_memory=True)
