import torch
from shell.knowledge_graph.text_classification.data_process import *
import pandas as pd
import numpy as np
from torch.utils.data import DataLoader, TensorDataset
from torch import nn
from utility.import_log import flog
log = flog(__file__)

# 加载训练数据与验证数据
def load_data(batch_size=32):
    log.info('开始加载训练数据文件')
    df = pd.read_csv('data.csv')
    log.info('数据文件加载成功,开始进行训练验证数据划分')
    train_df, eval_df = split_data(df)
    train_x = train_df['question']
    train_y = train_df['label']
    eval_x = eval_df['question']
    eval_y = eval_df['label']

    train_x = padding_seq(train_x.apply(seq2index))
    train_y = np.array(train_y)

    train_data_set = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
    train_data_loader = DataLoader(dataset=train_data_set, batch_size=batch_size, shuffle=True)

    eval_x = padding_seq(eval_x.apply(seq2index))
    log.info('数据加载,padding等预处理完毕')
    # eval_y = one_hot(eval_y)
    # eval_data_set = TensorDataset(torch.from_numpy(eval_x), torch.from_numpy(eval_y))
    # eval_data_loader = DataLoader(dataset=eval_data_set, batch_size=batch_size, shuffle=True)
    #
    # return train_data_loader, eval_data_loader
    return train_data_loader, eval_x, eval_y.values


# 把数据转换成index
def seq2index(seq):
    log.info('开始加载字典文件,进行数据转index操作')
    vocab = pd.read_csv('vocab.txt', header=None)
    dictionary = dict(zip(vocab[0].values, vocab.index.values))
    seg = tokenize(seq)
    for i in range(0, len(seg)):
        if seg[i] in dictionary.keys():
            seg[i] = dictionary[seg[i]]
        else:
            seg[i] = dictionary['[UNK]']
    return seg


# 统一长度
def padding_seq(X, max_len=15):
    return np.array([
        np.concatenate([x, [0] * (max_len - len(x))]) if len(x) < max_len else x[:max_len] for x in X
    ])
    # for seg in x:
    #     if len(seg) < max_len:
    #         seg.extend([0] * (max_len - len(seg)))
    #     else:
    #         del seg[max_len:]
    # return x


def one_hot(y, nb_classes=5):
    y = np.asarray(y, dtype='int16')
    if not nb_classes:
        nb_classes = np.max(y) + 1
    Y = np.zeros((len(y), nb_classes))
    Y[np.arange(len(y)), y] = 1.
    return Y


# 3、构建模型、训练、打印评估指标
class TextClassification(nn.Module):
    def __init__(self):
        super().__init__()

        self.embedding = nn.Embedding(411, 200)
        self.lstm = nn.LSTM(input_size=200, hidden_size=256, num_layers=3, dropout=0.2, batch_first=True)
        self.full_connect1 = nn.Linear(256, 100)
        self.full_connect2 = nn.Linear(100, 5)
        self.softmax = nn.Softmax()

    def forward(self, x):
        embedding = self.embedding(x)
        out = self.lstm(embedding)
        out = self.full_connect1(out[0][:, -1, :])
        out = self.full_connect2(out)
        out = self.softmax(out)
        return out


def train():
    train_data_loader, eval_x, eval_y = load_data()
    model = TextClassification()

    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    loss_func = nn.CrossEntropyLoss()

    for epoch in range(10):
        for step, (b_x, b_y) in enumerate(train_data_loader):

            output = model(b_x)
            loss = loss_func(output, b_y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step % 20 == 0:
                test_output = model(torch.from_numpy(eval_x))
                pred_y = torch.max(test_output, 1)[1].data.numpy()
                accuracy = float((pred_y == eval_y).astype(int).sum()) / float(eval_y.size)
                print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy)
    save_model(model)


# 4、保存模型
def save_model(model):
    torch.save(model.state_dict(), 'model.pkl')


if __name__ == '__main__':
    train()
