import random

from torch.utils.data import Dataset, DataLoader
import numpy as np
import datetime
from bdtime import tt
from bdtime import show_ls, show_json
from d2l import torch as d2l


def get_date_time_data(n=1000, seed=None):
    if seed is not None:
        np.random.seed(seed)
    res = []
    date_cn = []
    date_en = []
    for timestamp in np.random.randint(143835585, 2043835585, n):
        date = datetime.datetime.fromtimestamp(timestamp)
        x_i = date.strftime("%Y-%m-%d")
        y_i = date.strftime("%d/%b/%Y")
        date_cn.append(x_i)
        date_en.append(y_i)
        res.append(f"{x_i}   {y_i}")
    # return "\n".join(res)
    return date_cn, date_en, res


# date_cn, date_en, lines = get_date_time_data(100)
# print("====== lines ======")
# show_ls(lines[:5])


class SpecialSymbol:
    unk = "<unk>"  # d2l中默认自带的symbol
    pad = "<pad>"
    bos = "<bos>"
    eos = "<eos>"

    @staticmethod
    def all(add_unk=False):
        res = [SpecialSymbol.pad, SpecialSymbol.bos, SpecialSymbol.eos]
        if add_unk:
            res = [SpecialSymbol.unk] + res
        return res


# def get_target_vocab():
#     # date_en = [line.split()[1].split("/")[1] for line in lines]
#     vocab_date_en = [line.split("/")[1] for line in date_en]
#     target_vocab = set([str(i) for i in range(0, 10)] + ["-", "/"] + vocab_date_en)
#     return target_vocab
#
#
# get_target_vocab()


def tokenize(lines, token='char'):  #@save
    """将文本行拆分为单词或字符词元"""
    # if token == 'word':
    #     date_en = [line.split()[1].split("/")[1] for line in lines]
    #     # show_ls(res[:3])
    #     vocab = set(SpecialSymbol.all() + [str(i) for i in range(0, 10)] + ["-", "/"] + date_en)
    #     return vocab
    if token == 'char':
        return [list(line) for line in lines]
    else:
        print('错误：未知词元类型：' + token)


# tokenize(lines, token="word")
# source = tokenize(date_cn)

# src_vocab = d2l.Vocab(source, min_freq=1, reserved_tokens=SpecialSymbol.all())
# len(src_vocab)
# src_vocab.idx_to_token
# src_vocab.token_to_idx


# i = 0
# for x, y in enumerate(src_vocab):
#     print(x, "---", y)
#     i += 1
#     if i >= 10:
#         break
# exit()


def truncate_pad(line, num_steps, padding_token):
    """截断或填充⽂本序列"""
    if len(line) > num_steps:
        return line[:num_steps] # 截断
    return line + [padding_token] * (num_steps - len(line))  # 填充


# truncate_pad(src_vocab[source[0]], 10, src_vocab['<pad>'])
# "".join(source[0])


import torch
def build_array_nmt(lines, vocab, num_steps):
    """将机器翻译的文本序列转换成小批量"""
    # "".join(lines[0])
    lines = [vocab[l] for l in lines]
    lines = [l + [vocab['<eos>']] for l in lines]
    array = torch.tensor([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines])
    valid_len = (array != vocab['<pad>']).type(torch.int32).sum(1)
    return array, valid_len


# num_steps = 10
# build_array_nmt(source, src_vocab, num_steps)[0]

def get_attention_mask_by_valid_len(valid_len, num_steps):
    attention_mask = [[1] * valid_l + [0] * (num_steps - valid_l) for valid_l in valid_len]
    return attention_mask


date_cn, date_en, lines = get_date_time_data(1000)


def load_data_nmt(batch_size, num_steps, total_data=0):
    """返回翻译数据集的迭代器和词表"""
    global date_cn, date_en, lines
    if total_data:
        date_cn, date_en, lines = get_date_time_data(total_data)
    # text = preprocess_nmt(read_data_nmt())
    # source, target = tokenize_nmt(text, num_examples)
    source, target = tokenize(date_cn), tokenize(date_en)
    src_vocab = d2l.Vocab(source, min_freq=2,
                          reserved_tokens=SpecialSymbol.all())
    tgt_vocab = d2l.Vocab(target, min_freq=2,
                          reserved_tokens=SpecialSymbol.all())
    # len(src_vocab), len(tgt_vocab)
    # src_vocab.token_to_idx
    # tgt_vocab.token_to_idx

    src_array, src_valid_len = build_array_nmt(source, src_vocab, num_steps)
    tgt_array, tgt_valid_len = build_array_nmt(target, tgt_vocab, num_steps)
    data_arrays = (src_array, src_valid_len, tgt_array, tgt_valid_len)
    data_iter = d2l.load_array(data_arrays, batch_size)
    return data_iter, src_vocab, tgt_vocab


def conv_idx_to_tokens(idx, vocab, concat_symbol=None, skip_spacial_symbols=None):
    if not skip_spacial_symbols:
        skip_spacial_symbols = []
    if not (idx.numel() if isinstance(idx, torch.Tensor) else idx):
        res = []
    elif isinstance(idx, list):
        if isinstance(idx[0], torch.Tensor):
            # _idx = torch.stack(idx)
            # assert idx[0].dim() == 1 or idx[0].shape[0] == 1, 'list中的2维tensor, batch_size维必须为1!'
            if idx[0].dim() == 2:
                _idx = [_i.cpu().numpy().tolist()[0] for _i in idx]
            else:
                _idx = [_i.cpu().numpy().tolist() for _i in idx]
            res = [vocab.to_tokens(_i) for _i in _idx]
        elif isinstance(idx[0], list):
            res = [vocab.to_tokens(_i) for _i in idx]
        else:
            res = [vocab.to_tokens(idx)]
    elif isinstance(idx, torch.Tensor):
        assert idx.dim() <= 2, "idx必须为小于2维的Tensor"
        if idx.dim() == 1:
            res = [vocab.to_tokens(idx.tolist())]
        else:
            res = [vocab.to_tokens([int(i) for i in id_i.cpu().tolist()]) for id_i in idx]
    else:
        raise TypeError(f'idx类型[{type(idx)}]错误!')

    if concat_symbol is not None:
        if skip_spacial_symbols:
            res = [[i for i in r if i not  in skip_spacial_symbols] for r in res]
        res = [f"{concat_symbol}".join(r) for r in res]
    return res


if __name__ == '__main__':
    date_cn, date_en, lines = get_date_time_data(100, seed=1)
    # date_cn, date_en, lines = get_date_time_data(100, seed=2)

    train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=100, num_steps=20)
    i = 1
    for X, X_valid_len, Y, Y_valid_len in train_iter:
        print(i, X.shape, Y.shape)
        i += 1
        # print('X:', X.type(torch.int32))
        # print('X的有效长度:', X_valid_len)
        # print('Y:', Y.type(torch.int32))
        # print('Y的有效长度:', Y_valid_len)
        # break


