import os
import sys
import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader, TensorDataset
from torch import nn
from data_process import *
import torch.nn.functional as F
from utility.import_log import flog
log = flog(__file__)

# 加载训练数据与验证数据
def load_data(batch_size=32):
    df = pd.read_csv('parent_data.csv')
    train_df, eval_df = split_data(df)
    train_x = train_df['question']
    train_y = train_df['label']
    eval_x = eval_df['question']
    eval_y = eval_df['label']

    train_x = padding_seq(train_x.apply(seq2index))
    train_y = np.array(train_y)

    train_data_set = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
    train_data_loader = DataLoader(dataset=train_data_set, batch_size=batch_size, shuffle=True)

    eval_x = padding_seq(eval_x.apply(seq2index))
    eval_y = np.array(eval_y)
    # eval_y = one_hot(eval_y)
    # eval_data_set = TensorDataset(torch.from_numpy(eval_x), torch.from_numpy(eval_y))
    # eval_data_loader = DataLoader(dataset=eval_data_set, batch_size=batch_size, shuffle=True)
    #
    # return train_data_loader, eval_data_loader
    return train_data_loader, eval_x, eval_y


# 把数据转换成index
def seq2index(seq):
    vocab = pd.read_csv('vocab.txt', header=None)
    dictionary = dict(zip(vocab[0].values, vocab.index.values))
    seg = tokenize(seq)
    for i in range(0, len(seg)):
        if seg[i] in dictionary.keys():
            seg[i] = dictionary[seg[i]]
        else:
            seg[i] = dictionary['[UNK]']
    return seg


# 统一长度
def padding_seq(X, max_len=15):
    return np.array([
        np.concatenate([x, [0] * (max_len - len(x))]) if len(x) < max_len else x[:max_len] for x in X
    ])
    # for seg in x:
    #     if len(seg) < max_len:
    #         seg.extend([0] * (max_len - len(seg)))
    #     else:
    #         del seg[max_len:]
    # return x


def one_hot(y, nb_classes=5):
    y = np.asarray(y, dtype='int16')
    if not nb_classes:
        nb_classes = np.max(y) + 1
    Y = np.zeros((len(y), nb_classes))
    Y[np.arange(len(y)), y] = 1.
    return Y


# 3、构建模型、训练、打印评估指标
class TextClassification(nn.Module):
    def __init__(self, vocab_size, embedding_dim, kernel_num, kernel_size, stride, dropout, target_size):
        super().__init__()
        self.vocab_size = vocab_size
        self.embedding_dim = embedding_dim
        self.kernel_num = kernel_num
        self.kernel_size = kernel_size
        self.stride = stride
        self.dropout = dropout
        self.target_size = target_size
        self.embedding = nn.Embedding(self.vocab_size, self.embedding_dim)
        self.convs = nn.ModuleList(
            [nn.Conv2d(1, self.kernel_num, (k, self.embedding_dim)) for k in self.kernel_size])
        self.dropout = nn.Dropout(self.dropout)
        self.fc = nn.Linear(self.kernel_num * len(self.kernel_size), self.target_size)
        self.softmax = nn.Softmax()

    def relu_and_pool(self, x, conv):
        x = F.relu(conv(x)).squeeze(3)
        x = F.max_pool1d(x, x.size(2)).squeeze(2)
        return x

    # 正向传播
    def forward(self, x):
        out = self.embedding(x)  # x:(b_size, seq_len, embed_dim)
        out = out.unsqueeze(1)  # (b_size, 1, seq_len, embed_dim)
        out = torch.cat([self.relu_and_pool(out, conv) for conv in self.convs], 1)
        out = self.dropout(out)
        out = self.fc(out)
        out = self.softmax(out)
        return out


def train(vocab_size, embedding_dim, kernel_num, kernel_size, stride, dropout, target_size, batch_size):
    train_data_loader, eval_x, eval_y = load_data(batch_size)
    model = TextClassification(vocab_size, embedding_dim, kernel_num, kernel_size, stride, dropout, target_size)
    log.info('模型结构,超参设置完成')
    loss_func = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    log.info('模型开始迭代运算')
    try:
        for epoch in range(20):
            for step, (b_x, b_y) in enumerate(train_data_loader):
                output = model(b_x)
                loss = loss_func(output, b_y)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                if step % 20 == 0:
                    test_output = model(torch.from_numpy(eval_x))
                    pred_y = torch.max(test_output, 1)[1].data.numpy()
                    accuracy = float((pred_y == eval_y).astype(int).sum()) / float(eval_y.size)
                    log.info(f"'Epoch: ', {epoch}, '| train loss:' {format(loss.data.numpy(), '.4f')}, '| test accuracy:' {format(accuracy,'.2f')}")
    except Exception as e:
        log.error(f'模型训练失败:{e}')
        raise e
    save_model(model)
    log.info('模型保存成功')


# 4、保存模型
def save_model(model):
    torch.save(model, 'model_cnn.pkl')
    # torch.save(model.state_dict(), 'model_param.pkl')
    #


if __name__ == '__main__':
    train(411, 128, 100, [3,4,5], 1, 0.2, 5,50)
