#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   fanyi_seq2seq.py
@Time    :   2021-11-05 15:50:07
@Author  :   GuoLiuFang
@Version :   0.1
@Contact :   909104374@qq.com
@License :   (C)Copyright 2018-2021, RandomMatrix
@Desc    :   None
'''
import enum
import logging
from paddle import device
from paddle.fluid.dataloader import batch_sampler
from paddle.fluid.layers.nn import pad
file_handler = logging.FileHandler(filename='log.log')
stdout_handler = logging.StreamHandler()
logging.basicConfig(
    level=logging.DEBUG,
    handlers=[file_handler, stdout_handler],
    format='%(asctime)s - %(processName)s - %(name)s - %(relativeCreated)d - %(threadName)s - %(levelname)s -- %(message)s'
)

# import other libs
"""
它能够将一个任意长度的源序列，转换成，另一个任意序列长度的目标序列。
    编码阶段：将整个，源序列，编码成一个向量。
    解码阶段：通过最大化，预测，序列概率，从中，解码出，整个目标序列。
    编码和解码的过程，通常都是使用RNN实现。

    # Encoder采用LSTM，
    # Decoder采用带有attention机制的LSTM
"""
# %%
import paddlenlp
paddlenlp.__version__

# %%
import io
import os

from functools import partial

import numpy as np

import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlenlp.data import Vocab, Pad, vocab
from paddlenlp.metrics import Perplexity
from paddlenlp.datasets import load_dataset
# %%
train_ds, test_ds = load_dataset('couplet', splits=['train', 'test'])

# %%
print(len(train_ds), len(test_ds))
# %%
for i in range(5):
    print(train_ds[i])
# %%
vocab = Vocab.load_vocabulary(**train_ds.vocab_info)
# %%
trg_idx2word = vocab.idx_to_token
vocab_size = len(vocab)
# %%
pad_id = vocab[vocab.eos_token]
bos_id = vocab[vocab.bos_token]
eos_id = vocab[vocab.eos_token]
print (pad_id, bos_id, eos_id)
# %%
def convert_example(example, vocab):
    pad_id = vocab[vocab.eos_token]
    bos_id = vocab[vocab.bos_token]
    eos_id = vocab[vocab.eos_token]
    source = [bos_id] + vocab.to_indices(example['first'].split('\x02')) + [eos_id]
    target = [bos_id] + vocab.to_indices(example['second'].split('\x02')) + [eos_id]
    return source, target
trans_func = partial(convert_example, vocab=vocab)
train_ds = train_ds.map(trans_func, lazy=False)
test_ds = test_ds.map(trans_func, lazy=False)

# %%
# paddle.io.DataLoader 返回一个迭代器，该迭代器根据batch_sampler 指定的顺序
# 迭代返回dataset数据。
# 支持单线程或多线程。

# 批采样器实例，迭代式，获取mini-batch的样本，下标数组。。数组长度与batch_size一致。
# collate_fn 指定如何将样本列表组合为mini-batch数据。
# 这个棒呀，有了collate函数，之前说的文本对齐操作，就可以进行了。
# 在这里传入的是prepare_input函数，对产生的数据进行pad操作，并返回实际长度。
def create_data_loader(dataset):
    data_loader = paddle.io.DataLoader(
        dataset,
        batch_sampler=None,
        batch_size = batch_size,
        collate_fn = partial(prepare_input, pad_id=pad_id)
    )
    return data_loader

def prepare_input(insts, pad_id):
    src, src_length = Pad(pad_val=pad_id, ret_length=True)([inst[0] for inst in insts])
    tgt, tgt_length = Pad(pad_val=pad_id, ret_length=True)([inst[1] for inst in insts])
    tgt_mask = (tgt[:, :-1] != pad_id).astype(paddle.get_default_dtype())
    return src, src_length, tgt[:, :-1], tgt[:, 1:, np.newaxis], tgt_mask
# %%
device = "cpu"
device = paddle.set_device(device)

batch_size = 128
num_layers = 2
dropout = 0.2
hidden_size = 256
max_grad_norm = 5.0
learning_rate = 0.001
max_epoch = 20
model_path = './couplet_models'
log_freq = 200

# Define dataloader
train_loader = create_data_loader(train_ds)
test_loader = create_data_loader(test_ds)

print(len(train_ds), len(train_loader), batch_size)
# %%
for i in train_loader:
    print(len(i))
    for ind, each in enumerate(i):
        print(ind, each.shape)
    break
# %%
# 定义Encoder
class Seq2SeqEncoder(nn.Layer):
    def __init__(self, vocab_size, embed_dim, hidden_size, num_layers):
        super(Seq2SeqEncoder, self).__init__()
        self.embedder = nn.Embedding(vocab_size, embed_dim)
        self.lstm = nn.LSTM(
            input_size=embed_dim,
            hidden_size=hidden_size,
            num_layers=num_layers,
            dropout= 0.2 if num_layers > 1 else 0.
        )
    def forward(self, sequence, sequence_length):
        inputs = self.embedder(sequence)
        # inputs 就是句子长度 为行。。每行是，词向量维度。
        encoder_output, encoder_state = self.lstm(
            inputs,
            sequence_length=sequence_length
            # sequence_length 就是句子商都。
        )
        # encoder_ouput
        # src, [128, 18]
        # [128, 18, 256]
        # Ct，[batch_size, time_steps, hidden_size]
        # encoder_state
        # (tuple) - 最终状态，包含一个Ht 和 Ct
        # [2, 128, 256] [num_layers * num_directions, batch_size, hidden_size]
        # [2, 128, 256]是，Ht的值，中间状态值。
        return encoder_output, encoder_state
        # 跑完步，把输出，闹明白。
# %%
class AttentionLayer(nn.Layer):
    def __init__(self, hidden_size):
        super.__init__(AttentionLayer, self).__init__()
        self.input_proj = nn.Linear(hidden_size, hidden_size)
        self.output_proj = nn.Linear(hidden_size + hidden_size, hidden_size)
    
    def forward(self, hidden, encoder_output, encoder_padding_mask):
        # hidden [2, 128, 256]
        # encoder_output [128, 18, 256]
        encoder_output = self.input_proj(encoder_output)
        # encoder_output [128, 18, 256]
        attn_scores = paddle.matmul(
            # unsqueeze [2, 128, 256]       after [2, 1, 128, 256]
            paddle.unsqueeze(hidden, [1]), # 在batch_size后，增加一个维度。。
            # encoder_output    [128, 18, 256]
            # encoder_output transpose_y    [256, 18, 128]
            encoder_output,
            transpose_y=True
        )
        print('attention score', attn_scores.shape)
        # [128, 1, 18]
        if encoder_padding_mask is not None:
            attn_scores = paddle.add(attn_scores, encoder_padding_mask)
        attn_scores = F.softmax(attn_scores)
        attn_out = paddle.squeeze(
            # ???   I do not know 了。
            paddle.matmul(attn_scores, encoder_output),
            [1]
        )
        # 
        print('1 attn_out', attn_out.shape)
        # [128, 256]
        attn_out = paddle.concat([attn_out, hidden], 1)
        print('2 attn_out', attn_out.shape)
        attn_out = self.output_proj(attn_out)
        print('3 attn_out', attn_out.shape)
        return attn_out