import torch
import torch.nn as nn
import re
import jieba
from torch.utils.data import DataLoader
import torch.nn.functional as F
import time
import torch.optim as optim
def build_vocab():
    fname = 'data/jaychou_lyrics.txt'

    clean_sentences = []
    for line in open(fname, 'r', encoding='utf-8'):
        line = line.replace('〖韩语Rap译文〗', '')
        # 保留中文、数字、部分标点符号
        line = re.sub(r'[^\u4e00-\u9fa5 a-zA-Z0-9!?,]', '', line)
        # 连续空格替换成1个
        line = re.sub(r'[ ]{2,}', '', line)
        # 去除两侧空格、换行
        line = line.strip()
        # 去除单字的行
        if len(line) <= 1:
            continue

        if line not in clean_sentences:
            clean_sentences.append(line)

    # print(clean_sentences)

    # 分词
    all_sentences = []
    index_to_word = []

    for line in clean_sentences:
        words = jieba.lcut(line)
        all_sentences.append(words)

        for word in words:
            if word not in index_to_word:
                index_to_word.append(word)

    word_to_index = {word: idx for idx, word in enumerate(index_to_word)}
    # print(word_to_index)

    # 构建词向量
    corpus_index = []
    for sentence in all_sentences:
        temp = []
        for word in sentence:
            temp.append(word_to_index[word])

        # 每行歌词加一个空格,就不用加换行符了
        temp.append(word_to_index[' ']) # 每行歌词加一个空格
        corpus_index.extend(temp)
    # print(corpus_index)

    return index_to_word, word_to_index, corpus_index, len(index_to_word)

'''
如果不使用特殊方法,获取长度时不能直接使用len(instance).
而需要调用instance.get_length();获取元素也不能使用索引语法instance[idx],而要改为instance.get_item(idx)
'''

class LyricsDataset:  # 构建数据集,方便使用DataLoader
    def __init__(self, corpus_index,num_chars):
        self.corpus_index = corpus_index
        # 语料长度
        self.num_chars = num_chars
        # 词的数量
        self.word_count = len(self.corpus_index)
        # 句子数量
        self.number = self.word_count // self.num_chars

    def __len__(self):
        return self.number

    def __getitem__(self, idx):
        # 修正索引值到: [0, self.word_count - 1]
        start = min(max(idx, 0), self.word_count - self.num_chars - 2)

        x = self.corpus_index[start: start + self.num_chars]
        y = self.corpus_index[start + 1: start + 1 + self.num_chars]

        return torch.tensor(x), torch.tensor(y)



# def test():
#     index_to_word, word_to_index, corpus_index, word_len = build_vocab()
#     lrc_dataset = LyricsDataset(corpus_index, 5)
#     data_loader = DataLoader(lrc_dataset, batch_size=1,shuffle=False) # batch_size是一批
#     for x, y in data_loader:
#         print(x)
#         print(y)
#         break


class TextGenerator(nn.Module):

    def __init__(self, vocab_size):
        super(TextGenerator, self).__init__()
        # 初始化词嵌入层
        self.ebd = nn.Embedding(vocab_size, 128)
        # 循环网络层
        self.rnn = nn.RNN(128, 128, 1)
        # 输出层
        self.out = nn.Linear(128, vocab_size)

    def forward(self, inputs, hidden):

        # 输出维度: (1, 5, 128)
        embed = self.ebd(inputs)

        # 正则化层
        embed = F.dropout(embed, p=0.2)

        # 修改维度: (5, 1, 128)
        output, hidden = self.rnn(embed.transpose(0, 1), hidden)

        # 正则化层
        embed = F.dropout(output, p=0.2)

        # 输入维度: (5, 128)
        # 输出维度: (5, 5682)
        output = self.out(output.squeeze())

        return output, hidden

    def init_hidden(self):
        return torch.zeros(1, 1, 128)



def train():

    # 构建词典
    index_to_word, word_to_index, corpus_index, word_count = build_vocab()
    # 数据集
    lyrics = LyricsDataset(corpus_index, 32)
    # 初始化模型
    model = TextGenerator(word_count)
    # 损失函数
    criterion = nn.CrossEntropyLoss()
    # 优化方法
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    # 训练轮数
    epoch = 200
    # 迭代打印
    iter_num = 300
    # 训练日志
    train_log = 'lyrics_training.log'
    file = open(train_log, 'w')

    # 开始训练
    for epoch_idx in range(epoch):

        # 数据加载器
        lyrics_dataloader = DataLoader(lyrics , shuffle=True, batch_size=1)
        # 训练时间
        start = time.time()
        # 迭代次数
        iter_num = 0
        # 训练损失
        total_loss = 0.0

        for x, y in lyrics_dataloader:

            # 隐藏状态
            hidden = model.init_hidden()
            # 模型计算
            output, hidden = model(x, hidden)
            # 计算损失
            loss = criterion(output, y.squeeze())
            # 梯度清零
            optimizer.zero_grad()
            # 反向传播
            loss.backward()
            # 参数更新
            optimizer.step()

            iter_num += 1
            total_loss += loss.item()

        message = 'epoch %3s loss: %.5f time %.2f' % \
                  (epoch_idx + 1,
                   total_loss / iter_num,
                   time.time() - start)
        print(message)
        file.write(message + '\n')

    file.close()

    # 模型存储
    torch.save(model.state_dict(), 'lyrics_model_%d.bin' % epoch)



def predict(start_word, sentence_length):

    # 构建词典
    index_to_word, word_to_index, _, word_count = build_vocab()
    # 构建模型
    model = TextGenerator(vocab_size=word_count)
    # 加载参数
    model.load_state_dict(torch.load('lyrics_model_200.bin'))

    # 隐藏状态
    hidden = model.init_hidden()
    # 词转换为索引
    word_idx = word_to_index[start_word]
    generate_sentence = [word_idx]
    for _ in range(sentence_length):
        output, hidden = model(torch.tensor([[word_idx]]), hidden)
        word_idx = torch.argmax(output)
        generate_sentence.append(word_idx)

    for idx in generate_sentence:
        print(index_to_word[idx], end='')
    print()


if __name__ == '__main__':
    predict('分手', 50)

