
from io import open
import unicodedata
import string
import re
import random
import glob


import time
import math


import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F




import matplotlib.ticker as ticker
import numpy as np

import matplotlib.pyplot as plt
plt.switch_backend('agg')
%matplotlib inline

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


SOS_token = 0
EOS_token = 1



# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
    return ''.join(
        c for c in unicodedata.normalize('NFD', s)
        if unicodedata.category(c) != 'Mn'
    )


def normalizeString(s):
    s = unicodeToAscii(s.lower().strip())
    s = re.sub(r"([.!?])", r" \1", s)
    s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
    return s

# res = normalizeString("!!!sss")
# print(res)








# 过滤掉一些价值不大的句子
MAX_LENGTH = 100

# eng_prefixes = (
#     "i am ", "i m ",
#     "he is", "he s ",
#     "she is", "she s ",
#     "you are", "you re ",
#     "we are", "we re ",
#     "they are", "they re "
# )


def filterPair(p):
    return len(p[0].split(' ')) < MAX_LENGTH and \
           len(p[1].split(' ')) < MAX_LENGTH and \
           p[1].startswith(eng_prefixes)


def filterPairs(pairs):
    return [pair for pair in pairs if filterPair(pair)]





#############################
####   自己的数据导入技术  #####
#############################

SOS_token = 0
EOS_token = 1

all_chars = string.printable





class Code:
    def __init__(self, name):
        self.name = name

        self.char2index = {"SOS":0,"EOS":1}
        for idx, char in enumerate(all_chars):
            self.char2index[char] = idx+2

       
        self.index2char = {0: "SOS", 1: "EOS"}
        for k,v in self.char2index.items():
            self.index2char[v] = k


        self.n_chars = 2 + len(all_chars)  # Count SOS and EOS

        self.char2count = {}



    # def addSentence(self, sentence):
    #     for word in sentence.split(' '):
    #         self.addWord(word)

    # def addWord(self, word):
    #     if word not in self.word2index:
    #         self.word2index[word] = self.n_words
    #         self.word2count[word] = 1
    #         self.index2word[self.n_words] = word
    #         self.n_words += 1
    #     else:
    #         self.word2count[word] += 1



my_code = Code("pre")
# print(my_code.char2index)
# print(my_code.index2char)



def loadData(reverse=False):
    for file_name in glob.glob('generated_data/*.txt'):
        with open(file_name) as f:
            all_pairs = []
            
            while True:
                line1 = f.readline()
                if not line1:
                    break
                line1 = line1.strip()
                line2 = f.readline().strip()


                if len(line1)<MAX_LENGTH and len(line2)<MAX_LENGTH:
                    all_pairs.append([line1,line2])
            return all_pairs

code_pairs = loadData()
print(random.choice(code_pairs))



class EncoderRNN(nn.Module):
    def __init__(self, input_size, hidden_size):
        super(EncoderRNN, self).__init__()
        self.hidden_size = hidden_size

        self.embedding = nn.Embedding(input_size, hidden_size)
        self.gru = nn.GRU(hidden_size, hidden_size)

    def forward(self, input, hidden):
        # print(f'input:{input.shape}') # [1]
        embedded = self.embedding(input)

        # print(f'embedded:{embedded.shape}') # [1,256]
        output = embedded.view(1, 1, -1)

        output, hidden = self.gru(output, hidden)
        # print(f'output:{output.shape}') # [1,1,256]
        return output, hidden

    def initHidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=device)





class DecoderRNN(nn.Module):
    def __init__(self, hidden_size, output_size):
        super(DecoderRNN, self).__init__()
        self.hidden_size = hidden_size

        self.embedding = nn.Embedding(output_size, hidden_size)
        self.gru = nn.GRU(hidden_size, hidden_size)
        self.out = nn.Linear(hidden_size, output_size)
        self.softmax = nn.LogSoftmax(dim=1)

    def forward(self, input, hidden):
        output = self.embedding(input).view(1, 1, -1)
        output = F.relu(output)
        output, hidden = self.gru(output, hidden)
        output = self.softmax(self.out(output[0]))
        return output, hidden

    def initHidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=device)




class AttnDecoderRNN(nn.Module):
    def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
        super(AttnDecoderRNN, self).__init__()
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.dropout_p = dropout_p
        self.max_length = max_length

        self.embedding = nn.Embedding(self.output_size, self.hidden_size)
        self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
        self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
        self.dropout = nn.Dropout(self.dropout_p)
        self.gru = nn.GRU(self.hidden_size, self.hidden_size)
        self.out = nn.Linear(self.hidden_size, self.output_size)

    def forward(self, input, hidden, encoder_outputs):
        embedded = self.embedding(input).view(1, 1, -1)
        embedded = self.dropout(embedded)

        attn_weights = F.softmax(
            self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
        attn_applied = torch.bmm(attn_weights.unsqueeze(0),
                                 encoder_outputs.unsqueeze(0))

        output = torch.cat((embedded[0], attn_applied[0]), 1)
        output = self.attn_combine(output).unsqueeze(0)

        output = F.relu(output)
        output, hidden = self.gru(output, hidden)

        output = F.log_softmax(self.out(output[0]), dim=1)
        return output, hidden, attn_weights

    def initHidden(self):
        return torch.zeros(1, 1, self.hidden_size, device=device)






###############################
####  我自己的代码变成向量  ######
###############################
def code2indexes(code):
    return [my_code.char2index[char] for char in code]



def code2tensor(code):
    indexes = code2indexes(code)
    indexes.append(EOS_token)
    return torch.tensor(indexes,dtype=torch.long, device=device).view(-1,1)


def codePair2Tensors(pair):
    # print(pair[0])
    # print(pair[1])
    input_tensor = code2tensor(pair[0])
    target_tensor = code2tensor(pair[1])

    return (input_tensor,target_tensor)



# thing1 = random.choice(code_pairs)
# thing2 = codePair2Tensors(thing1)
# print(thing2)

#############################################################



# 助手函数
def asMinutes(s): #把秒转为分钟
    m = math.floor(s / 60)
    s -= m * 60
    return '%dm %ds' % (m, s)


def timeSince(since, percent):
    now = time.time()
    s = now - since
    es = s / (percent)
    rs = es - s
    return '%s (- %s)' % (asMinutes(s), asMinutes(rs))





# 画图-关于损失
def showPlot(points):
    plt.figure()
    fig, ax = plt.subplots()
    # this locator puts ticks at regular intervals
    loc = ticker.MultipleLocator(base=0.2)
    ax.yaxis.set_major_locator(loc)
    plt.plot(points)



# 训练函数，需要放入input_tensor, target_tensor
teacher_forcing_ratio = 0.5
def train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
    encoder_hidden = encoder.initHidden()

    encoder_optimizer.zero_grad()
    decoder_optimizer.zero_grad()

    input_length = input_tensor.size(0)
    target_length = target_tensor.size(0)

    encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)

    loss = 0

    for ei in range(input_length):
        encoder_output, encoder_hidden = encoder(
            input_tensor[ei], encoder_hidden)


        # print(f'===encoder output:{encoder_output.shape}')
        # print(f"===encoder hidden:{encoder_hidden.shape}")

        encoder_outputs[ei] = encoder_output[0, 0]

    decoder_input = torch.tensor([[SOS_token]], device=device)

    decoder_hidden = encoder_hidden

    use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False

    if use_teacher_forcing:
        # Teacher forcing: Feed the target as the next input
        for di in range(target_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(
                decoder_input, decoder_hidden, encoder_outputs)
            loss += criterion(decoder_output, target_tensor[di])
            decoder_input = target_tensor[di]  # Teacher forcing

    else:
        # Without teacher forcing: use its own predictions as the next input
        for di in range(target_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(
                decoder_input, decoder_hidden, encoder_outputs)
            topv, topi = decoder_output.topk(1)
            decoder_input = topi.squeeze().detach()  # detach from history as input

            loss += criterion(decoder_output, target_tensor[di])
            if decoder_input.item() == EOS_token:
                break

    loss.backward()

    encoder_optimizer.step()
    decoder_optimizer.step()

    return loss.item() / target_length




# 训练相关函数，把数据搞出来给train函数用
def trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01):
    start = time.time()

    plot_losses = []

    print_loss_total = 0  # Reset every print_every
    plot_loss_total = 0   # Reset every plot_every

    encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate)

    # print(f'===n_iters:{n_iters}')


    # 拿出训练数据
    training_pairs = [codePair2Tensors(random.choice(code_pairs)) ### 修改点1： pairs -> code-pairs；  修改点2：tensorsFromPair -> codePair2Tensors
                      for i in range(n_iters)]
    # print(f'===trainning pairs:{len(training_pairs)}')
    # print(f'===trainning pairs:{training_pairs}')
    criterion = nn.NLLLoss()

    for iter in range(1, n_iters + 1):
        training_pair = training_pairs[iter - 1]
        input_tensor = training_pair[0]    # 取出第一个
        target_tensor = training_pair[1]   # 取出第二个
        # print(f'input_tensor:{input_tensor}')

        loss = train(input_tensor, target_tensor, encoder,
                     decoder, encoder_optimizer, decoder_optimizer, criterion)

        print_loss_total += loss
        plot_loss_total += loss

        if iter % print_every == 0:
            print_loss_avg = print_loss_total / print_every
            print_loss_total = 0
            print('%s <%d %d%%> %.4f' % (timeSince(start, iter / n_iters),
                                         iter, iter / n_iters * 100, print_loss_avg))

        if iter % plot_every == 0:
            plot_loss_avg = plot_loss_total / plot_every
            plot_losses.append(plot_loss_avg) #把loss放进列表里，后面一起画图
            plot_loss_total = 0

    showPlot(plot_losses)





# 评估函数
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):
    with torch.no_grad():
        input_tensor = code2tensor(sentence)  #修改点
        input_length = input_tensor.size()[0]
        encoder_hidden = encoder.initHidden()

        encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)

        for ei in range(input_length):
            encoder_output, encoder_hidden = encoder(input_tensor[ei],
                                                     encoder_hidden)
            encoder_outputs[ei] += encoder_output[0, 0]

        decoder_input = torch.tensor([[SOS_token]], device=device)  # SOS

        decoder_hidden = encoder_hidden

        decoded_words = []
        decoder_attentions = torch.zeros(max_length, max_length)

        for di in range(max_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(
                decoder_input, decoder_hidden, encoder_outputs) 

            # print(f"=== decoder attention:{decoder_attention.shape}")
            # print(f"=== decoder attention data:{decoder_attention.data}")
            # print(f'=== attentions:{decoder_attentions[di].shape}')

            decoder_attentions[di] = decoder_attention.data


            topv, topi = decoder_output.data.topk(1)

            # print(f'decoder output:{decoder_output.shape}')

            if topi.item() == EOS_token:
                decoded_words.append('<EOS>')
                break

            else:
                decoded_words.append(my_code.index2char[topi.item()])

            decoder_input = topi.squeeze().detach()

        return decoded_words, decoder_attentions[:di + 1]


# 随便来跑
def evaluateRandomly(encoder, decoder, n=10):
    for i in range(n):
        pair = random.choice(code_pairs)
        print('>', pair[0])
        print('=', pair[1])
        output_words, attentions = evaluate(encoder, decoder, pair[0])
        output_sentence = ''.join(output_words)
        print('<', output_sentence)
        print('')



# 开始训练

hidden_size = 256
encoder1 = EncoderRNN(my_code.n_chars, hidden_size).to(device) #修改点：embedding 词典 size
attn_decoder1 = AttnDecoderRNN(hidden_size, my_code.n_chars, dropout_p=0.1).to(device)


N_ITERS = 20000 #75000
PRINT_EVERY = 100 #5000
PLOT_EVERY = 100
trainIters(encoder1, attn_decoder1, N_ITERS, print_every=PRINT_EVERY, plot_every=PLOT_EVERY, learning_rate=0.0001)




