import re
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import time
import random
import matplotlib.pyplot as plt

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
SOS_token = 0
EOS_token = 1
MAX_LENGTH = 10
data_path = './data/eng-fra-v2.txt'


def normalizeString(s):
    s = s.lower().strip()
    s = re.sub(r"([.!?])", r"\1", s)
    s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
    return s

def my_getdata():
    my_lines = open('./data/eng-fra-v2.txt', encoding='utf-8').read().strip().split('\n')
    tmppairs = []
    my_pairs = []
    for line in my_lines:
        for s in line.split('\t'):
            tmppairs.append(normalizeString(s))
        my_pairs.append(tmppairs)
        tmppairs = []
    my_pairs = [[normalizeString(s) for s in line.split('\t')] for line in my_lines]
    english_word2index = {"SOS": 0, "EOS": 1};
    english_word_n = 2
    french_word2index = {"SOS": 0, "EOS": 1};
    french_word_n = 2
    for pair in my_pairs:
        for word in pair[0].split():
            if word not in english_word2index:
                english_word2index[word] = english_word_n
                english_word_n += 1
        for word in pair[1].split():
            if word not in french_word2index:
                french_word2index[word] = french_word_n
                french_word_n += 1
    english_index2word = {v: k for k, v in english_word2index.items()}
    french_index2word = {v: k for k, v in french_word2index.items()}
    return english_word2index, english_index2word, english_word_n, \
        french_word2index, french_index2word, french_word_n, my_pairs

# 全局函数 获取英语单词字典 法语单词字典 语言对列表my_pairs
english_word2index, english_index2word, english_word_n, \
    french_word2index, french_index2word, french_word_n, my_pairs = my_getdata()

class MyPairsDataset(Dataset):
    def __init__(self, my_pairs):
        self.my_pairs = my_pairs
        self.sample_len = len(my_pairs)

    def __len__(self):
        return self.sample_len

    def __getitem__(self, item):
        # 按索引 获取数据样本 x y
        x = self.my_pairs[item][0]
        y = self.my_pairs[item][1]

        # 样本x 文本数值化   word2id  x.append(EOS_token)
        x = [english_word2index[word] for word in x.split(' ')]
        x.append(EOS_token)
        tensor_x = torch.tensor(x, dtype=torch.long, device=device)

        # 样本y 文本数值化   word2id  y.append(EOS_token)
        y = [french_word2index[word] for word in y.split(' ')]
        y.append(EOS_token)
        tensor_y = torch.tensor(y, dtype=torch.long, device=device)

        # 返回tensor_x, tensor_y
        return tensor_x, tensor_y

class EncoderRnn(nn.Module):
    # inputsize=2803 个单词 hidden_size=256 特征
    def __init__(self, input_size, hidden_size):
        super(EncoderRnn, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.embedding = nn.Embedding(num_embeddings=input_size, embedding_dim=hidden_size)
        self.gru = nn.GRU(input_size=hidden_size, hidden_size=hidden_size, batch_first=True)

    def forward(self, input, hidden):
        # input= [1,6] --> output=[1,6,256]
        output = self.embedding(input)
        # gru([1,6,256],hidden=[1,1,256]) --> [1,6,256],[1,1,256]
        output, hidden = self.gru(output, hidden)
        return output, hidden

    def init_hidden(self):
        return torch.zeros(1, 1, self.hidden_size)

class DecoderRnn(nn.Module):
    def __init__(self, outputsize, hidden_size):
        super(DecoderRnn, self).__init__()
        self.outputsize = outputsize  # 4345
        self.hidden_size = hidden_size  # 256

        # 法文词向量层
        self.embedding = nn.Embedding(outputsize, hidden_size)
        self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True)
        self.out = nn.Linear(hidden_size, outputsize)
        self.softmax = nn.LogSoftmax(dim=-1)

    def forward(self, input, hidden):
        # [1,1] --> [1,1,256]
        input = self.embedding(input)
        input = F.relu(input)
        # gru([1,1,256],[1,1,256])--> [1,1,256],[1,1,256
        output, hidden = self.gru(input, hidden)
        # [1,1,256] 0--> [1,256] -> [1,4345]
        output = self.out(output[0])
        output = self.softmax(output)
        return output, hidden

    def inithidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=self.device)

class AttnDecoderRnn(nn.Module):
    def __init__(self, outputsize, hidden_size, dropout_p=0.1, max_length=MAX_LENGTH):
        super(AttnDecoderRnn, self).__init__()
        self.outputsize = outputsize  # 4345
        self.hidden_size = hidden_size  # 256
        self.dropout_p = dropout_p
        self.max_length = max_length

        # 法文词向量层
        self.embedding = nn.Embedding(outputsize, hidden_size)
        self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True)
        self.out = nn.Linear(hidden_size, outputsize)
        self.softmax = nn.LogSoftmax(dim=-1)

        # 权重分布
        self.attn = nn.Linear(self.hidden_size + self.hidden_size, self.max_length)
        # 注意力结果按指定维度输出
        self.attn_combie = nn.Linear(self.hidden_size + self.hidden_size, self.hidden_size)
        self.dropout = nn.Dropout(self.dropout_p)

    # 111
    def attentionQKV(self, Q, K, V):
        # [1,1,32] [1,1,32] -> [1,32] [1,32] -> [1,64]
        tmp1 = torch.cat((Q[0], K[0]), dim=-1)
        # [1,64] -> [1,10]
        tmp2 = self.attn(tmp1)
        # [1,10] -> [1,10]
        attn_weights = F.softmax(tmp2, dim=-1)
        # print('attn_weights-->', attn_weights.shape, attn_weights)
        # bmm [1,10] -> [1,1,10] * [1,10,32] -> [1,1,32]
        attn_applied = torch.bmm(attn_weights.unsqueeze(0), V)
        # [1,32] [1,32] -> [1,64]
        output = torch.cat((Q[0], attn_applied[0]), dim=-1)
        # [1,64] -> [1,32] -> [1,1,32]
        output = self.attn_combie(output).unsqueeze(0)
        return output, attn_weights

    def forward(self, input, hidden, encoder_outputs):
        # [1,1] --> [1,1,256]
        input = self.embedding(input)

        input = self.dropout(input)
        input, attn_weights = self.attentionQKV(input, hidden, encoder_outputs.unsqueeze(0))

        input = F.relu(input)
        # gru([1,1,256],[1,1,256])--> [1,1,256],[1,1,256
        output, hidden = self.gru(input, hidden)
        # [1,1,256] 0--> [1,256] -> [1,4345]
        output = self.out(output[0])
        output = self.softmax(output)

        return output, hidden, attn_weights

    def inithidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=self.device)


# 训练参数
epochs = 1
mylr = 1e-4
teacher_forcing_ratio = 0.5
print_interval_num = 100
plot_interval_num = 100

def Train_Iters(x,y,my_encoderrnn,my_attndecoderrnn,myadam_encode,myadam_decode,mycrossentropyloss):
    # 1、编码
    encode_hiden = my_encoderrnn.init_hidden()
    # 数据形状 [1,6],[1,1,256] --> [1,6,256],[1,1,256]
    encode_output,encode_hidden =  my_encoderrnn(x,encode_hiden)

    # 2、拼接中间语义张量c
    encode_output_c =  torch.zeros(MAX_LENGTH,my_encoderrnn.hidden_size,device=device)
    for idx in range(encode_output.shape[1]):
        encode_output_c[idx] = encode_output[0,idx]

    # 3、准备解码数据
    decode_hidden = encode_hidden
    input_y = torch.tensor([[SOS_token]],device=device)
    myloss = .0
    y_len = y.shape[1]

    # 4、解码 总结：一个数，是0维常量，()。.view（1）把它变为 1维,(1,)，即数组。view(1,-1),把它变为2维，(1,1)。
    use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False

    for idx in range(y_len):
        # [1,1] [1,1,256] [10,256] -> [1,4345],[1,1,256],[1,10]
        output_y,decode_hidden,attn_eight = my_attndecoderrnn(input_y,decode_hidden,encode_output_c)
        tmp = y[0][idx]
        target_y = tmp.view(1) # [1,]
        myloss += mycrossentropyloss(output_y,target_y)
        input_y = tmp.view(1, -1)

    # 5、梯度反向传播等
    myadam_encode.zero_grad()
    myadam_decode.zero_grad()
    myloss.backward()
    myadam_encode.step()
    myadam_decode.step()

    return myloss.item() / y_len

def Train_seq2seq():
    # 数据
    mypairsdataset = MyPairsDataset(my_pairs)
    mydataloader = DataLoader(dataset=mypairsdataset,batch_size=1,shuffle=True)
    # 编码器 解码器
    my_encoderrrnn = EncoderRnn(2803,256)
    my_attndecoderrn = AttnDecoderRnn(4345,256)
    # 优化器
    myadam_encode = optim.Adam(my_encoderrrnn.parameters(),lr=mylr)
    myadam_decode = optim.Adam(my_attndecoderrn.parameters(),lr=mylr)
    # 损失函数
    mycorssentropyloss = nn.NLLLoss()
    # 临时结果保存参数
    plot_loss_list = []
    # 训练
    for epoch_idx in range(1,1+epochs):
        print_loss_total,plot_losss_total = 0.0,0.0
        starttime = time.time()
        for item,(x,y) in enumerate(mydataloader,start=1):
            myloss = Train_Iters(x,y,my_encoderrrnn,my_attndecoderrn,myadam_encode,myadam_decode,mycorssentropyloss)
            print_loss_total += myloss
            plot_losss_total += myloss

            if item % print_interval_num == 0:
                print_loss_avg = print_loss_total/print_interval_num
                print_loss_total = 0
                print('轮次%d 损失%.6f 时间：%d' %(epoch_idx,print_loss_avg,time.time() - starttime))

            if item % plot_interval_num == 0:
                plot_losss_avg = plot_losss_total/plot_interval_num
                plot_loss_list.append(plot_losss_avg)
                plot_losss_total = 0
                pass
        # 每个轮次保存模型
        torch.save(my_encoderrrnn.state_dict(),'./my_encoderrrnn_%d.pth' % epoch_idx)
        torch.save(my_attndecoderrn.state_dict(),'./my_attndecoderrnn_%d.pth' % epoch_idx)

    # 所有轮次训练完，画图
    plt.figure()
    plt.plot(plot_loss_list)
    plt.savefig('./s2sq_loss.png')
    plt.show()

def dm01_test_MyPairsDataset():
    mypairsdataset = MyPairsDataset(my_pairs)
    mydataloader = DataLoader(dataset=mypairsdataset, batch_size=1, shuffle=True)
    for idx, (x, y) in enumerate(mydataloader):
        print('x-->', x.shape, x)
        print('y-->', y.shape, y)
        if idx == 1:
            break
    print('文本数值化 ok')

def dm02_test_EncoderRnn():
    # 1 实例化dataset对象
    mypairsdataset = MyPairsDataset(my_pairs)
    # 2 实例化dataloader
    mydataloader = DataLoader(dataset=mypairsdataset, batch_size=1, shuffle=True)
    # 3 实例化模型
    myencoderrnn = EncoderRnn(2803, 256)
    print('myencoderrnn-->', myencoderrnn)
    # 4 给模型喂数据
    for idx, (x, y) in enumerate(mydataloader):
        print('x-->', x)
        print('y-->', y)
        # 一次性喂数据
        hidden = myencoderrnn.init_hidden()
        encode_output_c, hidden = myencoderrnn(x, hidden)
        print('encode_output_c-->', encode_output_c, encode_output_c.shape)

        # 一个字符一个字符
        hidden = myencoderrnn.init_hidden()
        for i in range(x.shape[1]):
            tmp = x[0][i].view(1, -1)
            encode_output, hidden = myencoderrnn(tmp, hidden)
            # print('output-->', encode_output, encode_output.shape)
        break

def dm03_test_DecoderRnn():
    # 1 实例化dataset对象
    mypairsdataset = MyPairsDataset(my_pairs)
    # 2 实例化dataloader
    mydataloader = DataLoader(dataset=mypairsdataset, batch_size=1, shuffle=True)
    myencoderrnn = EncoderRnn(2803, 256)
    mydecoderrnn = DecoderRnn(4345, 256)
    print('mydataloader--> ', mydecoderrnn)

    for x, y in mydataloader:
        print('x-->', x)
        print('y-->', y)

        hidden = myencoderrnn.init_hidden()
        encoder_ouput_c, hidden = myencoderrnn(x, hidden)
        # 解码
        for i in range(y.shape[1]):
            tmp = y[0][i].view(1, -1)
            output_y, hidden = mydecoderrnn(tmp, hidden)
            print('每个时间步的输出4345种可能->output_y-->', output_y.shape)
        break

def dm04_test_AttnDecoderRnn():
    # 1 实例化dataset对象
    mypairsdataset = MyPairsDataset(my_pairs)
    # 2 实例化dataloader
    mydataloader = DataLoader(dataset=mypairsdataset, batch_size=1, shuffle=True)
    myencoderrnn = EncoderRnn(2803, 256)
    mydecoderrnn = AttnDecoderRnn(4345, 256, dropout_p=0.1, max_length=10)
    print('mydecoderrnn--> ', mydecoderrnn)

    for x, y in mydataloader:
        print('x-->', x)
        print('y-->', y)

        hidden = myencoderrnn.init_hidden()
        output, hidden = myencoderrnn(x, hidden)  # [1,6,256]

        # 转成固定的中间语料c
        encode_output_c = torch.randn(10, 256)
        for idx in range(output.shape[1]):
            encode_output_c[idx] = output[0, idx]

        # 解码
        for i in range(y.shape[1]):
            tmp = y[0][i].view(1, -1)
            output_y, hidden, attn_weights = mydecoderrnn(tmp, hidden, encode_output_c)
            print('每个时间步的输出4345种可能->output_y-->', output_y.shape)
        break



if __name__ == '__main__':
    # dm01_test_MyPairsDataset()
    # dm02_test_EncoderRnn()
    # dm03_test_DecoderRnn()
    Train_seq2seq()
    print("seq2seq 英译法")
