import os
import sys
import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader, TensorDataset
from torch import nn
from data_process import *


# 加载训练数据与验证数据
def load_data(batch_size=32):
    df = pd.read_csv('parent_data.csv')
    train_df, eval_df = split_data(df)
    train_x = train_df['question']
    train_y = train_df['label']
    eval_x = eval_df['question']
    eval_y = eval_df['label']

    train_x = padding_seq(train_x.apply(seq2index))
    train_y = np.array(train_y)

    train_data_set = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
    train_data_loader = DataLoader(dataset=train_data_set, batch_size=batch_size, shuffle=True)

    eval_x = padding_seq(eval_x.apply(seq2index))
    eval_y = np.array(eval_y)
    # eval_y = one_hot(eval_y)
    # eval_data_set = TensorDataset(torch.from_numpy(eval_x), torch.from_numpy(eval_y))
    # eval_data_loader = DataLoader(dataset=eval_data_set, batch_size=batch_size, shuffle=True)
    #
    # return train_data_loader, eval_data_loader
    return train_data_loader, eval_x, eval_y


# 把数据转换成index
def seq2index(seq):
    vocab = pd.read_csv('vocab.txt', header=None)
    dictionary = dict(zip(vocab[0].values, vocab.index.values))
    seg = tokenize(seq)
    for i in range(0, len(seg)):
        if seg[i] in dictionary.keys():
            seg[i] = dictionary[seg[i]]
        else:
            seg[i] = dictionary['[UNK]']
    return seg


# 统一长度
def padding_seq(X, max_len=15):
    return np.array([
        np.concatenate([x, [0] * (max_len - len(x))]) if len(x) < max_len else x[:max_len] for x in X
    ])
    # for seg in x:
    #     if len(seg) < max_len:
    #         seg.extend([0] * (max_len - len(seg)))
    #     else:
    #         del seg[max_len:]
    # return x


def one_hot(y, nb_classes=5):
    y = np.asarray(y, dtype='int16')
    if not nb_classes:
        nb_classes = np.max(y) + 1
    Y = np.zeros((len(y), nb_classes))
    Y[np.arange(len(y)), y] = 1.
    return Y


# 3、构建模型、训练、打印评估指标
class TextClassification(nn.Module):
    def __init__(self,vocab_size,embedding_dim,hidden_size,num_layers,dropout,target_size):
        super().__init__()
        self.vocab_size = vocab_size
        self.embedding_dim = embedding_dim
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.dropout = dropout
        self.target_size = target_size
        self.embedding = nn.Embedding(self.vocab_size,self.embedding_dim)
        self.lstm = nn.LSTM(input_size=self.embedding_dim, hidden_size=self.hidden_size,
                            num_layers=self.num_layers,  dropout=self.dropout, batch_first=True, bidirectional=True)
        self.linear = nn.Linear(self.hidden_size*2,self.target_size)
        self.softmax = nn.Softmax()

    def forward(self, x):
        embedding = self.embedding(x)    # 输出embedding是一个三维矩阵，分别是 batchsize* seq_len*embedding_dim
        out = self.lstm(embedding)
        out = self.linear(out[0][:, -1, :])
        out = self.softmax(out)
        return out


def train(vocab_size,embedding_dim,hidden_size,num_layers,dropout,target_size):
    train_data_loader, eval_x, eval_y = load_data()
    model = TextClassification(vocab_size,embedding_dim,hidden_size,num_layers,dropout,target_size)

    loss_func = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    for epoch in range(50):
        for step, (b_x, b_y) in enumerate(train_data_loader):
            output = model(b_x)
            loss = loss_func(output, b_y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step % 20 == 0:
                test_output = model(torch.from_numpy(eval_x))
                pred_y = torch.max(test_output, 1)[1].data.numpy()
                accuracy = float((pred_y == eval_y).astype(int).sum()) / float(eval_y.size)
                print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy)

    save_model(model)


# 4、保存模型
def save_model(model):
    torch.save(model, 'model.pkl')
    # torch.save(model.state_dict(), 'model_param.pkl')
    #


if __name__ == '__main__':
    train(411,128,256,2,0.2,4)
