import json
from pprint import pprint

import numpy as np
import pandas as pd
from tensorflow.python.keras import layers
# from tensorflow.python.keras.legacy_tf_layers.core import legacy_tf_layers

import config
import tensorflow as tf
import tensorflow_addons as tfa

from utils import collate_fn, Dataset


def get_vocab_id():
    vocab = pd.read_csv(config.VOCAB_PATH, names=['word', 'id'])
    return list(vocab['word']), dict(vocab.values)


def get_label_id():
    label = pd.read_csv(config.LABEL_PATH, names=['label', 'id'])
    return list(label['label']), dict(label.values)


class Dataset():
    def __init__(self, train: bool = True, base_len=50):
        super(Dataset, self).__init__()

        self.base_len = base_len
        self.path = config.TRAIN_PATH if train else config.TEST_PATH

        self.data = pd.read_csv(self.path, sep=' ', names=['word', 'label'])

        _, self.vocab2id = get_vocab_id()
        _, self.label2id = get_label_id()

        self.cut_point = [0]
        self.count = 0
        self.get_address_point_size()

    def __getitem__(self, index):
        address = self.data.iloc[self.cut_point[index]:self.cut_point[index + 1], :]
        address_record = [self.vocab2id.get(word, config.WORD_UNK_ID) for word in address['word']]
        target = [self.label2id.get(label, self.label2id['O']) for label in address['label']]

        pad_len = 69 - len(address_record)
        address_record += [config.WORD_PAD_ID] * pad_len
        target += [config.LABEL_O_ID] * pad_len

        return address_record, target

    def __len__(self):
        return self.count

    def get_address_point_size(self):
        with open(self.path, encoding='utf-8') as file:
            file_iter = file.readlines()

        for index, word in enumerate(file_iter):
            if word == '\n':
                self.cut_point.append(index - self.count)
                self.count += 1


class BiLSTMCRF(tf.keras.Model):
    def __init__(self, vocab_size, tag_size, embedding_dim, lstm_units):
        super(BiLSTMCRF, self).__init__()
        self.embedding = tf.keras.layers.Embedding(
            vocab_size,
            embedding_dim,
            mask_zero=True
        )
        self.bi_lstm = tf.keras.layers.Bidirectional(
            tf.keras.layers.LSTM(config.HIDDEN_SIZE, return_sequences=True)
        )
        self.dense = tf.keras.layers.Dense(tag_size)
        self.transition_params = tf.Variable(
            tf.random.uniform(shape=(tag_size, tag_size))
        )

        # CRF layer
        self.crf = tfa.text.crf_log_likelihood

    def call(self, inputs, targets=None, training=None):
        x = self.embedding(inputs)
        x = self.bi_lstm(x)
        x = self.dense(x)

        if training:
            log_likelihood, self.transition_params = self.crf(
                x,
                targets,
                sequence_lengths=self.compute_sequence_length(inputs)
            )
            loss = tf.reduce_mean(-log_likelihood)
            return loss
        else:
            return x

    def compute_sequence_length(self, inputs):
        length = tf.reduce_sum(tf.sign(tf.abs(inputs)), axis=-1)
        return tf.cast(length, tf.int32)


def get_inputs(train=True):
    train = True if train else False
    inputs = []
    target = []
    for x, y in Dataset(train):
        inputs.append(x)
        target.append(y)
    return tf.convert_to_tensor(inputs), tf.convert_to_tensor(target)


if __name__ == '__main__':
    vocab_size = config.VOCAB_SIZE
    tag_size = config.TARGET_SIZE
    batch_size = config.BATCH_SIZE
    embedding_dim = config.EMBEDDING_DIM
    lstm_units = config.LSTM_UNITS

    inputs_array = []
    targets_array = []
    for index, (data, label) in enumerate(Dataset(train=True)):
        inputs_array.append([x for x in data])
        targets_array.append([x for x in label])
        if index == batch_size:
            break
    inputs = tf.convert_to_tensor(inputs_array, dtype=tf.int32)

    targets = tf.convert_to_tensor(targets_array, dtype=tf.int32)
    targets = tf.clip_by_value(targets, 0, tag_size - 1)

    model = BiLSTMCRF(vocab_size, tag_size, embedding_dim, lstm_units)
    optimizer = tf.keras.optimizers.Adam(learning_rate=config.LR)
    for epoch in range(10):
        with tf.GradientTape() as tape:
            loss = model(inputs, targets, training=True)

            optimizer = tf.keras.optimizers.Adam(learning_rate=config.LR)
            gradients = tape.gradient(loss, model.trainable_variables)
            optimizer.apply_gradients(zip(gradients, model.trainable_variables))
            print(f"Epoch {epoch + 1}, Loss: {loss.numpy()}")

    print("Training finished.")
