from __future__ import unicode_literals, print_function, division
from io import open
import glob
import os
import unicodedata
import string

import torch
from torch import nn, optim

import random
import time
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from tqdm import tqdm

"""

该模型为 RNN 的 N vs 1 ，输入一个seq(name)，输出1个类别结果

"""


def find_files(path):
    return glob.glob(path)


# print(find_files('data/names/*.txt'))


#  把字符转为正常的英文字符
def unicode_to_ascii(s):
    return ''.join(
        c for c in unicodedata.normalize('NFD', s)
        if unicodedata.category(c) != 'Mn'
        and c in all_letters
    )


# print(unicode_to_ascii('Ślusàrski'))

# 获取文件中的name
def get_names(file):
    names = []
    """
    这种方式读文件容易造成内存泄露， Memory Error
    """
    # with open(file, mode='r', encoding='utf8') as f:
    #     line = f.readline()
    #     while line:
    #         names.append(unicode_to_ascii(line.strip()))

    """
    更好的读文件方式
    """
    with open(file, mode='r', encoding='utf8') as f:
        for line in f:
            if line:
                names.append(unicode_to_ascii(line.strip()))
    return names


"""
转为 one-hot变量
"""
all_letters = string.ascii_letters + " .,;'"  # all_letters 代表了名字中每个字符的所有可能的字符，用于做成one-hot编码的索引
n_letters = len(all_letters)


def letter_to_index(letter):
    return all_letters.find(letter)  # string.find 返回子字符串在字符串中的索引index


# print(all_letters)

# 把字母转为one-hot
def letter_to_tensor(letter):
    tensor = torch.zeros(1, n_letters)
    tensor[0][letter_to_index(letter)] = 1
    return tensor


# 直接把一个name字符串转为one-hot
def line_to_tensor(name):
    tensor = torch.zeros(len(name), 1, n_letters)  # 三维tensor,格式要与input张量x对应
    for idx, word in enumerate(name):
        tensor[idx][0][letter_to_index(word)] = 1
    return tensor


"""
定义RNN
"""


class RNN(nn.Module):
    """
    input_size 向量长度，每个词的one-hot编码的长度
    hidden_size 隐藏层单元数
    output_size 输出类别数
    """

    def __init__(self, input_size, hidden_size, output_size):
        super(RNN, self).__init__()
        self.hidden_size = hidden_size

        self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
        self.i2o = nn.Linear(input_size + hidden_size, output_size)
        self.softmax = nn.LogSoftmax(dim=1)  # 返回的类别只有一个，dim设为1

    def forward(self, input, hidden):
        combined = torch.cat((input, hidden), 1)  # 0按行拼接，1按列拼接
        hidden = self.i2h(combined)  # 隐含状态的计算可看做输入x与前一个时间步的隐含状态h拼接后，输入到带有激活函数的全连接层
        output = self.i2o(combined)
        output = self.softmax(output)
        return output, hidden

    def initHidden(self):
        return torch.zeros(1, self.hidden_size)


"""
获取数据
"""


# 获取结果
def categoryFromOutput(output):
    top_n, top_i = output.topk(1)  # 返回k个最大值,跟他的索引index
    category_i = top_i[0].item()
    return all_categories[category_i], category_i


# 获取一些训练示例

def randomChoice(l):
    return l[random.randint(0, len(l) - 1)]


def randomTrainingExample():
    category = randomChoice(all_categories)  # 随机选择一个类别
    line = randomChoice(category_names[category])  # 类别里随机选择一个名字
    category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long)
    line_tensor = line_to_tensor(line)
    return category, line, category_tensor, line_tensor


""""
train
"""


def train(category_tensor, line_tensor):
    global output
    hidden = rnn.initHidden()  # 初始化隐藏变量，之后在迭代中不断地更新

    rnn.zero_grad()  # 初始化梯度（固定写法）

    # line_tensor.size()[0] 在这里表示的是name的长度，即每个batch的时间步长度，遍历name中的每个单词（one-hot编码）
    for i in range(line_tensor.size()[0]):
        output, hidden = rnn(line_tensor[i], hidden)  # 这里在每次迭代过程中会不断更新hidden的值，目的是得到最终的output

    loss = criterion(output, category_tensor)  # 固定写法
    loss.backward()  # 固定写法

    # Add parameters' gradients to their values, multiplied by learning rate,可以直接传给优化器就完了
    # optimizer = optim.Adam(rnn.parameters(), lr=0.0001)
    for p in rnn.parameters():
        p.data.add_(p.grad.data, alpha=-learning_rate)

    return output, loss.item()


def timeSince(since):
    now = time.time()
    s = now - since
    m = math.floor(s / 60)
    s -= m * 60
    return '%dm %ds' % (m, s)


"""
evaluate model
"""


# 把name的预测结果返回
def evaluate(line_tensor):
    global output
    hidden = rnn.initHidden()

    for i in range(line_tensor.size()[0]):
        output, hidden = rnn(line_tensor[i], hidden)

    return output


if __name__ == '__main__':
    category_names = {}
    all_categories = []
    print('读取文件')
    for file in find_files(r'D:\data\classification\data\names\*.txt'):
        # 获取文件名， splitText分割文件名与拓展名
        category = os.path.splitext(os.path.basename(file))[0]
        names = get_names(file)
        all_categories.append(category)
        category_names[category] = names
    n_categories = len(all_categories)
    print('文件读取完成')

    # 隐藏单元数，超参数
    n_hidden = 128
    rnn = RNN(n_letters, n_hidden, n_categories)
    # input 为当前字母的张量
    input = line_to_tensor('Chong')  # 对于该实例，时间步设置为单词长度，批次设置为1，即input形状为（len(word),1,n_letters）
    # hidden为上面传下来的隐藏状态，初始化为0
    hidden = torch.zeros(1, n_hidden)  # 1 128
    # model(x) 会自动调用forward函数,并返回结果
    output, next_hidden = rnn(input[0], hidden)  # input为输入seq的第一个元素！！！(这只是个test，其实后面的input seq没有用到，之后训练需要对他遍历)
    # print(categoryFromOutput(output))

    # 随机获取训练数据
    for i in range(10):
        category, line, category_tensor, line_tensor = randomTrainingExample()
        print('category =', category, '/ line =', line)

    """
    训练
    """
    criterion = nn.CrossEntropyLoss()
    learning_rate = 0.005  # 设置太高会爆炸，设置太小无法学习

    n_iters = 100000
    print_every = 5000
    plot_every = 1000

    # Keep track of losses for plotting
    current_loss = 0
    all_losses = []

    start = time.time()

    for iter in tqdm(range(1, n_iters + 1)):  # epoch
        category, line, category_tensor, line_tensor = randomTrainingExample()  # get random train data
        output, loss = train(category_tensor, line_tensor)  # 输入x,y训练,得到输出结果，与loss
        current_loss += loss  # 为了可视化，无需关注

        # Print iter number, loss, name and guess
        if iter % print_every == 0:
            guess, guess_i = categoryFromOutput(output)
            correct = '✓' if guess == category else '✗ (%s)' % category
            print('%d %d%% (%s) %.4f %s / %s %s' % (
                iter, iter / n_iters * 100, timeSince(start), loss, line, guess, correct))

        # Add current loss avg to list of losses
        if iter % plot_every == 0:
            all_losses.append(current_loss / plot_every)
            current_loss = 0

    plt.figure()
    plt.plot(all_losses)

    """
    评价模型 ： 混淆矩阵
    """

    # 初始化混淆矩阵
    confusion = torch.zeros(n_categories, n_categories)
    n_confusion = 10000

    for i in range(n_confusion):
        category, line, category_tensor, line_tensor = randomTrainingExample()
        output = evaluate(line_tensor)
        guess, guess_i = categoryFromOutput(output)
        category_i = all_categories.index(category)
        confusion[category_i][guess_i] += 1

    # Normalize by dividing every row by its sum
    for i in range(n_categories):
        confusion[i] = confusion[i] / confusion[i].sum()

    # Set up plot
    fig = plt.figure()
    ax = fig.add_subplot(111)
    cax = ax.matshow(confusion.numpy())
    fig.colorbar(cax)

    # Set up axes
    ax.set_xticklabels([''] + all_categories, rotation=90)
    ax.set_yticklabels([''] + all_categories)

    # Force label at every tick
    ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
    ax.yaxis.set_major_locator(ticker.MultipleLocator(1))

    # sphinx_gallery_thumbnail_number = 2
    plt.show()
