import os

import matplotlib.pyplot as plt
import torch
from d2l import torch as d2l
from s02_data_load_handler import tokenize_nmt

source, target = tokenize_nmt()
# 通过长文本使用\n进行切分，然后将每一行按照 \t切分出英文和发文，然后存到两个不同的列表中。这两个列表长度应该相等
print("确定训练集长度", len(source), len(target))
print("源", source[0:5])
print("目的", target[0:5])


def show_list_len_pair_hist(legend, xlabel, ylabel, xlist, ylist):
    """绘制列表长度对的直方图"""
    d2l.set_figsize()
    _, _, patches = d2l.plt.hist(
        [[len(l) for l in xlist], [len(l) for l in ylist]])
    d2l.plt.xlabel(xlabel)
    d2l.plt.ylabel(ylabel)
    for patch in patches[1].patches:
        patch.set_hatch('/')
    d2l.plt.legend(legend)


show_list_len_pair_hist(['source', 'target'], '# tokens per sequence',
                        'count', source, target);
# plt.show()
#
# 9.5.3. 词表
src_vocab = d2l.Vocab(source, min_freq=2,
                      reserved_tokens=['<pad>', '<bos>', '<eos>'])
print("特殊词信息", len(src_vocab))
print(src_vocab['<pad>'], src_vocab['<bos>'], src_vocab['<eos>'], src_vocab.to_tokens(0), src_vocab.to_tokens(4),
      src_vocab.to_tokens(5), src_vocab.to_tokens(6), src_vocab.to_tokens(7), src_vocab.to_tokens(8),
      src_vocab.to_tokens(9), src_vocab.to_tokens(10), src_vocab.to_tokens(11), src_vocab.to_tokens(12))


# 9.5.4. 加载数据集
def truncate_pad(line, num_steps, padding_token):
    """截断或填充文本序列"""
    if len(line) > num_steps:
        return line[:num_steps]  # 截断
    return line + [padding_token] * (num_steps - len(line))  # 填充


print(truncate_pad(src_vocab[source[0]], 10, src_vocab['<pad>']))


# print(truncate_pad(src_vocab[source[0]], 10, src_vocab['<unk>']))
#
def build_array_nmt(lines, vocab, num_steps):
    """将机器翻译的文本序列转换成小批量"""
    lines = [vocab[l] for l in lines]
    lines = [l + [vocab['<eos>']] for l in lines]
    array = torch.tensor([truncate_pad(
        l, num_steps, vocab['<pad>']) for l in lines])
    valid_len = (array != vocab['<pad>']).type(torch.int32).sum(1)
    return array, valid_len


tgt_vocab = d2l.Vocab(target, min_freq=2,
                      reserved_tokens=['<pad>', '<bos>', '<eos>'])
print("原始词表和目的次表大小")
print(len(src_vocab))
print(len(tgt_vocab))
# print(tgt_vocab)
print(src_vocab.to_tokens([i for i in range(len(src_vocab))]))
print(tgt_vocab.to_tokens([i for i in range(len(tgt_vocab))]))

src_array, src_valid_len = build_array_nmt(source, src_vocab, 30)
tgt_array, tgt_valid_len = build_array_nmt(target, tgt_vocab, 30)
print("分析单个告警的词的长度和有效词长度", len(src_array[10]), src_valid_len)
print("分析单个目的分类的词的长度和有效词长度", len(tgt_array[10]), tgt_valid_len)


# 9.5.5. 训练模型
def load_data_nmt(batch_size, num_steps, num_examples=600):
    """返回翻译数据集的迭代器和词表"""
    source, target = tokenize_nmt()
    # print(source)
    # print(target)
    src_vocab = d2l.Vocab(source, min_freq=2,
                          reserved_tokens=['<pad>', '<bos>', '<eos>'])
    tgt_vocab = d2l.Vocab(target, min_freq=2,
                          reserved_tokens=['<pad>', '<bos>', '<eos>'])
    src_array, src_valid_len = build_array_nmt(source, src_vocab, num_steps)
    tgt_array, tgt_valid_len = build_array_nmt(target, tgt_vocab, num_steps)
    data_arrays = (src_array, src_valid_len, tgt_array, tgt_valid_len)
    data_iter = d2l.load_array(data_arrays, batch_size)
    return data_iter, src_vocab, tgt_vocab

# train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=30)
# for X, X_valid_len, Y, Y_valid_len in train_iter:
#     print('X:', X.type(torch.int32))
#     print('X的有效长度:', X_valid_len)
#     print('Y:', Y.type(torch.int32))
#     print('Y的有效长度:', Y_valid_len)
#     break
# """
# X: tensor([[ 7, 35,  4,  3,  1,  1,  1,  1],
#         [ 0,  4, 70,  5,  3,  1,  1,  1]], dtype=torch.int32)
# X的有效长度: tensor([4, 5])
# Y: tensor([[ 6,  7, 85,  4,  3,  1,  1,  1],
#         [19,  0,  5, 54,  5,  3,  1,  1]], dtype=torch.int32)
# Y的有效长度: tensor([5, 6])
# """
