import re

import torch

from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
from tqdm import tqdm

device = torch.device("cuda" if torch.cuda.is_available() else 'cpu')
import torch.nn.functional as F

# print(device)

SOS_token = 0
EOS_token = 1
MAX_LENGTH = 10
data_path = './data/eng-fra-v2.txt'


# todo 1
def normalString(s):
    s1 = s.lower().strip()
    s2 = re.sub(r'([.!?])', r' \1', s)
    s3 = re.sub(r'[^a-zA-Z.!?]+', r" ", s2)
    return s3


# todo 2
def read_data():
    with open(data_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
    my_pairs = [[normalString(s) for s in line.split('\t')] for line in lines]
    print(my_pairs[0])
    english_word2index = {'SOS': 0, 'EOS': 1}
    english_word_n = 2
    french_word2index = {'SOS': 0, 'EOS': 1}
    french_word_n = 2
    for pair in my_pairs:
        for word in pair[0].split(' '):
            if word not in english_word2index:
                english_word2index[word] = english_word_n
                english_word_n += 1
        for word in pair[1].split(' '):
            if word not in french_word2index:
                french_word2index[word] = french_word_n
                french_word_n += 1
    # print(len(english_word2index), len(french_word2index))
    # print(english_word_n, french_word_n)
    english_index2word = {v: k for k, v in english_word2index.items()}
    french_index2word = {v: k for k, v in french_word2index.items()}
    # print(len(english_index2word), len(french_index2word))
    # print(english_word2index)
    return english_word2index, english_index2word, english_word_n, french_word2index, french_index2word, french_word_n, \
        my_pairs


english_word2index, english_index2word, english_word_n, french_word2index, french_index2word, french_word_n, \
    my_pairs = read_data()


# todo3
class SeqDataset(Dataset):
    def __init__(self, my_pairs):
        super().__init__()
        self.my_pairs = my_pairs

    def __len__(self):
        return len(my_pairs)

    def __getitem__(self, index):
        index = min(max(0, index), len(my_pairs) - 1)
        x = self.my_pairs[index][0]
        y = self.my_pairs[index][1]
        list_x = [english_word2index[word] for word in x.split(' ')]
        list_y = [french_word2index[word] for word in y.split(' ')]
        list_y.append(EOS_token)
        tensor_x = torch.tensor(list_x, dtype=torch.long, device=device)
        tensor_y = torch.tensor(list_y, dtype=torch.long, device=device)
        return tensor_x, tensor_y


# todo4
def get_dataloader():
    dataset = SeqDataset(my_pairs)
    dataloader = DataLoader(dataset=dataset, batch_size=1, shuffle=True)
    return dataloader


# todo5
class Encoder(nn.Module):
    def __init__(self, eng_vocab_size, embed_dim):
        super().__init__()
        self.vocab_size = eng_vocab_size
        self.embed_dim = embed_dim
        self.embedding = nn.Embedding(eng_vocab_size, embed_dim)
        self.gru = nn.GRU(embed_dim, embed_dim, batch_first=True)

    def forward(self, input_x, hidden):
        embed_x = self.embedding(input_x)
        output, hn = self.gru(embed_x, hidden)
        return output, hn

    def inithidden(self):
        return torch.zeros(1, 1, self.embed_dim, device=device)


def encoder_test():
    eng_vocab_size = english_word_n
    embed_dim = 256
    encoder = Encoder(eng_vocab_size, embed_dim)
    print(encoder)


# todo 6
class Decoder(nn.Module):
    def __init__(self, fren_vocab_size, embed_dim):
        super().__init__()
        self.fren_vocab_size = fren_vocab_size
        self.embed_dim = embed_dim
        self.embeded = nn.Embedding(fren_vocab_size, embed_dim)
        self.gru = nn.GRU(embed_dim, embed_dim, batch_first=True)
        self.out = nn.Linear(embed_dim, fren_vocab_size)
        self.softmax = nn.LogSoftmax(dim=-1)

        ...

    def forward(self, input_y, hidden):
        embeded = self.embeded(input_y)
        embeded = F.relu(embeded)
        output, hn = self.gru(embeded, hidden)
        result = self.softmax(self.out(output[0]))
        return result, hn

    def init_hidden(self):
        return torch.zeros(1, 1, self.embed_dim, device=device)


def test_decoder():
    vocab_size = english_word_n
    embed_dim = 256
    encoder = Encoder(vocab_size, embed_dim).to(device)
    french_size = french_word_n
    embed_dim = 256

    decoder = Decoder(french_size, embed_dim).to(device)
    # print(decoder)
    seq_dataloader = get_dataloader()
    for x, y in seq_dataloader:
        print('x.shape', x.shape)  # [1, 7]
        print('y.shape', y.shape)  # [1, 11]
        # print(f'x', x)
        # print(f'x', y)
        # break
        output, hn = encoder(x, hidden=encoder.inithidden())
        print(output.shape)  # [1, 7, 256]
        print(hn.shape)  # [1, 1, 256]
        # break
        for i in range(y.shape[1]):
            temp = y[0, i].view(1, -1)
            output, hn = decoder(temp, hn)
            print('output', output.shape)  # [1, 4346]
            print('hn', hn.shape)  # 1, 1, 4346]
        break

#todo 7 带attention的解码器模型
class DecoderAttention(nn.Module):
    def __init__(self, fren_vocabsize, embed_dim, max_len):
        super().__init__()
        self.fren_vocabsize = fren_vocabsize
        self.embed_dim = embed_dim
        self.max_len = max_len
        self.embedding = nn.Embedding(fren_vocabsize, embed_dim)
        self.attn = nn.Linear(embed_dim+embed_dim, max_len)
        self.attn_combine = nn.Linear(2*embed_dim, embed_dim)
        self.gru = nn.GRU(embed_dim, embed_dim, batch_first=True)
        self.out = nn.Linear(embed_dim, fren_vocabsize)
        self.softmax = nn.LogSoftmax(dim=-1)
    def forward(self, q, k, v):
        embeded_x = self.embedding(q)
        dropout_x = F.dropout(embeded_x, p=0.1)
        attn_weight = self.attn(torch.cat([dropout_x, k], dim=-1))
        attn_applied = torch.bmm(F.softmax(attn_weight, dim=-1), v)
        attention = self.attn_combine(torch.cat([attn_applied, dropout_x], dim=-1))
        attention = F.relu(attention)
        output, hn = self.gru(attention, k)
        result = self.out(output[0])
        return self.softmax(result), hn, attn_weight


def test_attndecoder():
    vocab_size = english_word_n
    embed_dim = 256
    encoder = Encoder(vocab_size, embed_dim).to(device)
    fren_vocab_size = french_word_n
    embed_dim = 256
    attn_decoder = DecoderAttention(fren_vocab_size, embed_dim, max_len=MAX_LENGTH).to(device)
    # print(encoder)
    # print(attn_decoder)
    seq_dataloader = get_dataloader()
    for x, y in tqdm(seq_dataloader):
        # print(x)
        # print(y)
        # break
        encoder_output, encoder_hidden = encoder(x, encoder.inithidden())
        encoder_output_c = torch.zeros(MAX_LENGTH, embed_dim, device=device)
        for i in range(x.shape[1]):#对于每个单词
            encoder_output_c[i] = encoder_output[0][i]
        encoder_output_c = encoder_output_c.unsqueeze(dim=0)
        input_y = torch.tensor([[SOS_token]], device=device)
        # print(input_y)
        # break
        hn = encoder_hidden
        for j in range(y.shape[1]):
            result, hn, attn_weights = attn_decoder(input_y, hn, encoder_output_c)
        #     print('**'*80)
        #     print(result.shape)
        #     print(hn.shape)
        #     print(attn_weights.shape)
        #     break
        # break
        # for i in range(y.shape[1]):
        #     temp = y[0][i].view(1, -1)

# todo8
my_lr = 1e-4
epochs = 2
teacher_forcing_ratio = 0.5

def model2train():
    seq_dataset = SeqDataset(my_pairs)
    train_loader = DataLoader(dataset=seq_dataset, batch_size=1, shuffle=True)


print(len(my_pairs))
if __name__ == '__main__':
    # read_data()
    # encoder_test()
    # test_decoder()
    test_attndecoder()