import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
# import torch.utils.data as data
# import random
import time
import math
import gensim
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, PackedSequence
import numpy as np

# import codecs
# codecs.register_error("strict", codecs.replace_errors)


EMBEDDING_SIZE = 300
LR = 0.01
NUM_LAYERS = 1
BATCH_SIZE = 200
EPOCH = 10
BIDIRECTIONAL = True

torch.manual_seed(11)
torch.cuda.manual_seed(11)


def word_vectors_to_tensor(word_vectors):
    # print(word_vectors)
    t = torch.Tensor(word_vectors)
    t.resize_((len(word_vectors), 1, EMBEDDING_SIZE))
    return t


class LSTM(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(LSTM, self).__init__()
        self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size, bias=True, num_layers=NUM_LAYERS,
                            dropout=0.5, bidirectional=True)

        if BIDIRECTIONAL:
            hid_size = hidden_size // 2
        else:
            hid_size = hidden_size

        self.linear = nn.Linear(hid_size, output_size, bias=True)
        self.softmax = F.softmax

    def forward(self, x):
        h1p, state = self.lstm(x)  # RNN
        h3 = self.linear(h1p.data)  # linear transform

        # create PackedSequence (pytorch docs: Instances of this class should never be created manually.
        #  They are meant to be instantiated by functions like pack_padded_sequence().
        h3p = PackedSequence(h3, h1p.batch_sizes)

        h4, lens = pad_packed_sequence(h3p)  # unpack
        index = torch.LongTensor(lens)
        batch_index = torch.LongTensor(range(0, len(lens)))
        h5 = h4[index.cuda() - 1, batch_index.cuda(), :]  # slice last element
        return self.softmax(h5)


def category_from_output(all_category_list, output):
    top_n, top_i = output.data.topk(1)  # Tensor out of Variable with .data
    category_i = top_i[0][0]
    return all_category_list[category_i], category_i


def category_from_batch_output(all_category_list, output):
    top_n, top_i = output.data.topk(1)  # Tensor out of Variable with .data
    return [all_category_list[i[0]] for i in top_i.numpy().tolist()], top_i


def get_word_vec(w2v_model, vocab_dict, word):
    if word in vocab_dict:
        # print(type(w2v_model[word]))
        return w2v_model[word]
    else:
        return None


def load_data(w2v_model, all_category_list=None, in_file=None):
    if all_category_list is None:
        all_category_list = []
    vocab = w2v_model.vocab
    vocab_dict = dict(zip(vocab, len(vocab) * [1]))
    i = 0
    with open(in_file, encoding='utf-8') as data_fd:
        for l in data_fd:
            d = l.strip().split('\t')
            if len(d) != 2:
                continue

            vec_list = [v.tolist() for v in
                        [get_word_vec(w2v_model, vocab_dict, w) for w in d[1].split()]
                        if v is not None]
            if not vec_list:
                continue

            t = all_category_list.index(d[0])
            yield i, vec_list, t
            i += 1


def padding(vec, max_len, size):
    length = len(vec)
    diff = max_len - length
    for _ in range(diff):
        vec.append(np.zeros(size).tolist())
    # print('padding: ', vec, '\n')
    return vec


def rearrange_data(vx, tx, lens, size):
    max_len = max(lens)
    # lens mast be sort in decreasing order
    sorted_lens = sorted(zip(lens, range(len(lens))), key=lambda x: x[0], reverse=True)
    lens = [i for i, _ in sorted_lens]
    index = [i for _, i in sorted_lens]
    vx = [padding(vx[d], max_len, size) for d in index]  # padding

    tx = [tx[d] for d in index]
    return vx, tx, lens


def get_batch_data(w2v_model, all_category_list=None, in_file=None):
    data_iter = load_data(w2v_model, all_category_list, in_file)
    # ix = []
    vx = []
    tx = []
    lens = []
    for x, (i, v, t) in enumerate(data_iter):
        vx.append(v)
        tx.append(t)
        lens.append(len(v))
        if (x + 1) % BATCH_SIZE == 0:
            # lens mast be sort in decreasing order
            vx, tx, lens = rearrange_data(vx, tx, lens, EMBEDDING_SIZE)
            yield np.array(vx, dtype=np.float32), np.array(tx, dtype=np.int64), lens
            vx, tx, lens = [], [], []

    vx, tx, lens = rearrange_data(vx, tx, lens, EMBEDDING_SIZE)
    yield np.array(vx, dtype=np.float32), np.array(tx, dtype=np.int64), lens


def time_since(since):
    now = time.time()
    s = now - since
    m = math.floor(s / 60)
    s -= m * 60
    return '%dm %ds' % (m, s)


def main():
    word2vec_model_file = sys.argv[1]  # gensim word2vec model file
    data_file = sys.argv[2]
    out_model_file = sys.argv[3]

    n_categories = 2
    n_hidden = 128
    rnn = LSTM(EMBEDDING_SIZE, n_hidden, n_categories)
    rnn = rnn.cuda()

    loss_func = nn.CrossEntropyLoss()

    print_every = 100
    current_loss = 0
    # all_losses = []

    start = time.time()

    all_categories = ['1', '5']

    word2vec_model = gensim.models.Word2Vec.load(word2vec_model_file)

    print(time_since(start))

    opt = optim.Adam(rnn.parameters(), lr=LR)
    for epoch in range(EPOCH):
        N = 0
        correct_num = 0

        data_iter = get_batch_data(word2vec_model, all_categories, data_file)
        for i, (b_x, b_y, lens) in enumerate(data_iter):
            b_x = np.transpose(b_x, (1, 0, 2))  # time step first

            b_x = Variable(torch.from_numpy(b_x).cuda())
            b_y = Variable(torch.from_numpy(b_y).cuda())

            b_x = pack_padded_sequence(b_x, lens)
            # print('seq: ', b_x, '\n')

            opt.zero_grad()
            output = rnn(b_x)
            loss = loss_func(output, b_y)
            loss.backward()
            torch.nn.utils.clip_grad_norm(rnn.parameters(), 0.5)  # add this line to prevent nan loss
            opt.step()

            current_loss += loss.data[0]

            guesses, guesses_i = category_from_batch_output(all_categories, output.cpu())

            categories = [all_categories[k] for k in b_y.data.cpu().numpy()]

            N += len(categories)
            correct_num += sum(np.array(guesses) == np.array(categories))

            if i % print_every == 0:
                print(time_since(start), ' epoch: ', epoch, ' iter: ', i, ' loss: ', current_loss,
                      ' accuracy: %.4f', correct_num / N)
                current_loss = 0
                for g, c in zip(guesses[0:20], categories[0:20]):
                    if g == c:
                        s = '^'
                    else:
                        s = 'x'
                    print(s, ' predict: ', g, ' actual: ', c)

    torch.save(rnn.state_dict(), out_model_file)


if __name__ == '__main__':
    import sys

    main()
