import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import jieba
from collections import Counter
from torch.utils.data import Dataset, DataLoader
import re

emb_dim = 128
hidden_size = 128
num_layers = 2
batch_size = 2

# input_sentence = np.load("./dialog/input_simpleQA.npy").tolist()
# output_sentence = np.load("./dialog/output_simpleQA.npy").tolist()

input_sentence = ["你好啊", "你多大了", "我想去吃午饭",'你给我讲个故事']
output_sentence = ["我很好", "我今年18岁了", "正好，我们一起去",'从前有座山，山上有座庙，庙里有两个和尚']

n_step1 = max([len(i) for i in input_sentence])
n_step2 = max([len(i) for i in output_sentence])
n_step = max(n_step1, n_step2) + 2

# 建立字典
def buildDic(texts):
    resultAll = []
    for text in texts:
        if isinstance(text, list):
            for t in text:
                t = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9，。！？]', '', t)
                result = jieba.cut(t)
                resultAll.extend(result)
    count = Counter(resultAll)
    word_vacab = count.most_common(9999)
    word_vacab = [w for w, i in word_vacab]
    word_vacab.insert(0, "<SOS>")
    word_vacab.insert(1, "<UNK>")
    word_vacab.insert(2, "<PAD>")
    word_vacab.insert(3, "<EOS>")

    word2idx = {word: idx for idx, word in enumerate(word_vacab)}

    return word_vacab, word2idx

# 字典建立
word_vacab, word2idx = buildDic([input_sentence, output_sentence])

# 超参数设置
input_size = len(word_vacab)

def preprocess_sentence(sentence, word2idx, n_step):
    # 字符过滤
    sentence = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9，。！？]', '', sentence)
    # 分词
    words = jieba.cut(sentence)
    # 索引映射
    array = [0] + [word2idx.get(w, word2idx["<UNK>"]) for w in words] + [3]
    # 序列填充
    if len(array) < n_step:
        array += [2] * (n_step - len(array))
    return array


def make_data(input_sentence, output_sentence):
    input_array, output_array = [], []
    for input_sent, output_sent in zip(input_sentence, output_sentence):
        input_array.append(preprocess_sentence(input_sent, word2idx, n_step))
        output_array.append(preprocess_sentence(output_sent, word2idx, n_step))
    return torch.Tensor(input_array).long(), torch.Tensor(output_array).long()

class Encoder(nn.Module):
    def __init__(self, input_size, hidden_size, emb_dim, num_layers):
        super(Encoder, self).__init__()
        self.embedding = nn.Embedding(input_size, emb_dim, padding_idx=word2idx["<PAD>"])
        self.lstm = nn.LSTM(emb_dim, hidden_size, num_layers, batch_first=True)

    def forward(self, x, h, c):
        x = self.embedding(x)
        out, (hidden, cell) = self.lstm(x, (h, c))
        return out, hidden, cell


class Decoder(nn.Module):
    def __init__(self, input_size, hidden_size, emb_dim, num_layers):
        super(Decoder, self).__init__()
        self.embedding = nn.Embedding(input_size, emb_dim, padding_idx=word2idx["<PAD>"])
        self.lstm = nn.LSTM(emb_dim, hidden_size, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_size, input_size)

    def forward(self, input, h, c):
        embedded = self.embedding(input)
        out, (hidden, cell) = self.lstm(embedded, (h, c))
        out = self.fc(out)
        return out, hidden, cell


class Seq2Seq(nn.Module):
    def __init__(self, encoder, decoder):
        super(Seq2Seq, self).__init__()
        self.encoder = encoder
        self.decoder = decoder

    def forward(self, input_seq, target_seq, h, c, teacher_forcing_ratio=0.5):
        # target_seq [1,8]
        target_batch_size = target_seq.size(0)
        target_length = target_seq.size(1)
        target_vocab_size = input_size

        out, hidden, cell = self.encoder(input_seq, h, c)

        outputs = torch.zeros(target_batch_size, target_length, target_vocab_size)

        input = target_seq[:, 0].unsqueeze(1)
        for t in range(1, target_length):
            output, hidden, cell = self.decoder(input, hidden, cell)

            predict = output.squeeze(1)

            outputs[:, t] = predict
            teacher_force = torch.rand(1).item() < teacher_forcing_ratio
            top1 = predict.argmax(1)

            input = target_seq[:, t] if teacher_force else top1  # 根据教师强制比率选择输入
            input = input.unsqueeze(1)

        return outputs

# 初始化模型
encoder = Encoder(input_size, hidden_size, emb_dim, num_layers)
decoder = Decoder(input_size, hidden_size, emb_dim, num_layers)
model = Seq2Seq(encoder, decoder)

# 加载模型参数
model.load_state_dict(torch.load("pytorch-seq2seq-jieba.pth"))

def predictSay(text):
    hidden = torch.zeros(num_layers, 1, hidden_size)
    cell = torch.zeros(num_layers, 1, hidden_size)

    input_tensor, output_tensor = make_data([text], [""])

    with torch.no_grad():
        output = model(input_tensor, output_tensor, hidden, cell)
        predict = output.data.max(2, keepdim=True)[1]
        decoded = [word_vacab[i] for i in predict.view(-1)]
    return ''.join(decoded).replace("<SOS>", "").replace("<PAD>", "").replace("<EOS>", "")


print(predictSay("讲个故事"))
