import os
import torch
import copy
from torch.utils.data import DataLoader
from model import TextCNN
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import pickle
from dataset import *


def adjust_learning_rate(optimizer, epoch, learning_rate):
    lr = learning_rate * (0.1 ** (epoch // 10))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
    return optimizer


def get_vocab(file_path):
    with open(file_path, "rb") as f:
        data = pickle.load(f)
        return data["cnt2word"]


def get_feature_vocab_size(file_path):
    with open(file_path, "rb") as f:
        data = pickle.load(f)
        return len(data["product_dict"]) + len(data["category_dict"])\
               + len(data["adv_id_dict"]) + len(data["industry_dict"])




def trainer():
    # device num
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)

    # train conf set
    epochs = 50
    batch_size = 1024 * 16
    use_gpu = torch.cuda.is_available()
    learning_rate = 0.01

    # model conf set
    embedding_dim = 64
    hidden_dim = 64
    filter_sizes = [2, 3, 5]
    num_filters = 3
    nlabel = 10

    # load data
    h5_train_pos = "/home/datanfs/macong_data/tencent_data/train_preliminary/train_data/h5train.pkl"
    aid_dict_pos = "/home/datanfs/macong_data/tencent_data/" \
                   "train_preliminary/train_data/h5aid.pkl"
    # tsv_store_dir = "/home/datanfs/macong_data/text_classification/trim_data"
    # h5_train_pos = os.path.join(tsv_store_dir, "Chinese_conversation", "h5train.pkl")
    # h5_valid_pos = os.path.join(tsv_store_dir, "Chinese_conversation", "h5valid.pkl")

    print("### load dataset ###")
    word2cnt = get_vocab(h5_train_pos)
    vocab_size = len(word2cnt)
    feature_vocab_size = get_feature_vocab_size(aid_dict_pos)
    print("### cocab size", vocab_size)

    train_ = Mdataset(h5_train_pos, aid_dict_pos)
    # eval_ = Mdataset(h5_valid_pos)

    train_loader = DataLoader(train_, batch_size=batch_size, shuffle=True, drop_last=True)
    # eval_loader = DataLoader(eval_, batch_size=batch_size, shuffle=True, drop_last=True)

    # create model
    print("### create model ###")
    model = TextCNN(vocab_size, feature_vocab_size, embedding_dim, hidden_dim, filter_sizes, num_filters, nlabel)

    model = model.to(device)
    # model.init_hidden(batch_size, device)

    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    criterion = nn.CrossEntropyLoss()

    # training procedure
    print("### start training ###")
    for epoch in range(epochs):

        train_running_loss, train_acc = 0.0, 0.0
        optimizer.zero_grad()
        optimizer = adjust_learning_rate(optimizer, epoch, learning_rate)

        for iter, traindata in enumerate(train_loader):
            train_inputs, train_labels, input_feature = traindata
            train_labels = torch.squeeze(train_labels)
            # src_len = torch.squeeze(src_len)

            if use_gpu:
                train_inputs, train_labels, input_feature = Variable(train_inputs).to(device),\
                                                      Variable(train_labels).to(device),\
                                                      Variable(input_feature).to(device)
            else:
                train_inputs = Variable(train_inputs)

            # permute [batch, sec_len] to [src_len, batch]
            # train_inputs = train_inputs.permute(1, 0)
            y_pred = model(train_inputs, input_feature)
            loss = criterion(y_pred, train_labels)
            loss.backward()
            optimizer.step()

            # unpacks the tensor into a scalar value
            train_running_loss += loss.detach().item()
            train_acc += model.get_accuracy(y_pred, train_labels, batch_size)
            # evaluate(model, eval_loader, criterion, batch_size, use_gpu=True, device=device)

        print("epoch {}, loss {:.2f}, acc {:.2f}".format(epoch, train_running_loss, train_acc/len(train_loader)))


def evaluate(model, data_loader, criterion, batch_size, use_gpu=True, device=0):
    # model.eval()
    epoch_loss = 0.0
    eval_acc = 0.0
    with torch.no_grad():
        for iter, eval_data in enumerate(data_loader):
            eval_inputs, eval_labels, src_len = eval_data
            eval_labels = torch.squeeze(eval_labels)
            src_len = torch.squeeze(src_len)

            if use_gpu:
                eval_inputs, eval_labels, src_len = Variable(eval_inputs).to(device),\
                                                      Variable(eval_labels).to(device),\
                                                      Variable(src_len).to(device)
            else:
                eval_inputs = Variable(eval_inputs)

            eval_inputs = eval_inputs.permute(1, 0)
            y_pred = model(eval_inputs, src_len)
            epoch_loss += criterion(y_pred, eval_labels).item()
            eval_acc += model.get_accuracy(y_pred, eval_labels, batch_size)

        print("eval phrase, loss {:.2f}, acc {:.2f}".format(epoch_loss, eval_acc / len(data_loader)))


def trainIters():
    raise NotImplementedError


if __name__ == '__main__':
    trainer()

