#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   seq2seq_model.py
@Time    :   2021-11-04 18:08:15
@Author  :   GuoLiuFang
@Version :   0.1
@Contact :   909104374@qq.com
@License :   (C)Copyright 2018-2021, RandomMatrix
@Desc    :   None
'''
import logging
from typing import Text

from torch.nn.modules import dropout
file_handler = logging.FileHandler(filename='log.log')
stdout_handler = logging.StreamHandler()
logging.basicConfig(
    level=logging.DEBUG,
    handlers=[file_handler, stdout_handler],
    format='%(asctime)s - %(processName)s - %(name)s - %(relativeCreated)d - %(threadName)s - %(levelname)s -- %(message)s'
)

# import other libs
# %%
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.legacy.datasets import Multi30k
from torchtext.legacy.data import Field, BucketIterator
import numpy as np
import spacy
import random
from torch.utils.tensorboard import SummaryWriter

# %%
spacy_ger = spacy.load('de_core_news_sm')
spacy_eng = spacy.load('en_core_web_sm')

def tokenizer_ger(text):
    return [tok.text for tok in spacy_ger.tokenizer(text)]

def tokenizer_eng(text):
    return [tok.text for tok in spacy_eng.tokenizer(text)]

german = Field(tokenize=tokenizer_ger, lower=True, init_token='<sos>', eos_token='<eos>')
english = Field(tokenize=tokenizer_eng, lower=True, init_token='<sos>', eos_token='<eos>')
# %%
tain_data, validation_data, test_data = Multi30k.splits(exts=('.de', '.en'), fields=(german, english))
# %%
german.build_vocab(tain_data, min_freq=2)
english.build_vocab(tain_data, min_freq=2)

# %%
class Encoder(nn.Module):
    def __init__(self, input_dim, emb_dim, hid_dim, num_layers, p):
        # input_dim is the vocabulary size
        super(Encoder, self).__init__()
        self.hidden_size = hid_dim
        self.num_layers = num_layers

        self.dropout = nn.Dropout(p)
        self.embedding = nn.Embedding(input_dim, emb_dim)
        # 确定输入；还有初始化状态。
        # h0 c0
        self.rnn = nn.LSTM(emb_dim, hid_dim, num_layers, dropout=p)
    
    def forward(self, x):
        # x shape: (seq_length, N)
        embeded = self.embedding(x)
        # append 一个emb_dim好像也没有任何问题
        # x shape: (seq_length, N, emb_dim)
        embeded = self.dropout(embeded)
        # x shape: (seq_length, N, emb_dim)
        # rnn---->input---->(L, N, H_{in})
        # L seq_length,
        # N 
        # H     features
        outputs, (hidden, cell) = self.rnn(embeded)
        # outputs 只是top hidden layers。。
        # outputs = [src len(seq length), batch_size(N), 
        # hid dim * n direction(把输入卷机后的特征，这块相当于feature map) 至于这块的非常map能有多大，
        # 由两块决定的一个是num layers一个是directions层数和方向]
        # hidden = 【n layers * n direciton , batch size, hid_dim】
        # cell 跟hidden其实是一回事儿。。
        """[outputs]
        outputs is the top-layer hidden state for 【each time-step】
        在时间序列上的最后一个hidden_state..是一个connotation的结果
        【hidden】就是the final hidden state for each layer 
        是每一层的，最后的状态。。至于有多少，就看有几个隐含层了。。
        【cell】the final cell for eachc layer跟隐含层一回事儿。。
        """
        return hidden, cell
class Decoder(nn.Module):
    def __init__(self, output_size, embedding_size, hidden_size, num_layers, p):
        super(Decoder, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.output_size = output_size

        self.dropout = nn.Dropout(p)
        # input_size 此表大小。这是初始化的时候。。
        self.embedding = nn.Embedding(output_size, embedding_size)
        self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers)
        # 对于OutPut来说，跟num_layers 没关系；跟directional有关系。
        # output_size输出词表大小。跟input_size一样。。
        self.fc = nn.Linear(hidden_size, output_size)
    def forward(self, input, hidden, cell):
        # hidden , cell我知道是什么，关键x是什么呢？
        # encoder的x是 sequence_length , N batch size ;
        # decoder的x是1， N一个字，或者叫一个token，看你自己模型的颗粒度问题；；
        # 既然是一个字。。
        input = input.unsqueeze(0)
        embedded = self.dropout(self.embedding(input))
        # 1, N , embedding_size
        # L, N, features
        # 可以，不写h0和c0因为可以自己创建。。
        outputs, (h, c) = self.rnn(embedded, (hidden, cell))
        # outputs------> 1, N, hidden_size * directoin
        # 因为fc是hidden_size 到output_size
        # out   1, N, output_size..
        outputs = outputs.suqueeze(0)
        prediction = self.fc(outputs)
        return prediction, hidden, cell
class Seq2Seq(nn.Module):
    def __init__(self, encoder, decoder):
        super(Seq2Seq, self).__init__()
        self.encoder = encoder
        self.decoder = decoder

    def forward(self,src, trg, teacher_forcing_ratio=0.5):
        # seq_length, N
        trg_len = trg.shape[0]
        batch_size = trg.shape[1]
        # src和trg需要padding对齐。
        trg_vocab_size = self.decoder.output_sizee
        # 先把东西直接传递给encoder
        # last hidden state of the encoder is used as the initial hidden state of the 
        hidden, cell = self.encoder(src)
        outputs = torch.zeros(trg_len, batch_size, trg_vocab_size)
        # decoder自己给decoder，所以，for循环里面，应该是
        input = trg[0,:]
        for t in range(1, trg_len):
            # 就是要进行target次预测。
            # 一个一个解码===========》
            predictions, hidden, cell = self.decoder(input, hidden, cell)
            # 通过for循环解决，并且第一个是<sos>...进入到decoder的
            outputs[t] = predictions
            best_guess = predictions.argmax(1)
            if random.random() > teacher_forcing_ratio:
                input = best_guess 
            else:
                input = trg[t]
            # scr，变成了，hidden，cell；；然后，decoder中的
            # 最后再concatenation=====>通过定义一个结果数组解决的。
            # 但是问题是，为什么是词表大小呢？？
        # N，predictions。
        return outputs


# %%
enc = Encoder(INPUT_DIM, )
model = Seq2Seq(enc, dec, device).to(device)



