# -*- coding: utf-8 -*-
import re
import sys

import torch
import math
import os
import numpy as np
import pandas as pd
import random
from torch.utils.data import DataLoader
from torch import nn
import time
from matplotlib import pyplot as plt

dtype = torch.float32
device = torch.device("cuda:0")


def gen_dict(data):
    vocab = list(set(data['Word'].apply(lambda x: x.lower())))
    vocab2id = {vocab[i - 1]: i for i in range(1, len(vocab) + 1)}
    vocab2id['<PAD>'] = 0
    id2vocab = {i: vocab[i - 1] for i in range(1, len(vocab) + 1)}
    id2vocab[0] = '<PAD>'
    tag = list(set(data['Tag']))
    tag2id = {tag[i - 1]: i for i in range(1, len(tag) + 1)}
    id2tag = {i: tag[i - 1] for i in range(1, len(tag) + 1)}
    id2tag[0] = '<PAD>'
    tag2id['<PAD>'] = 0
    return vocab2id, id2vocab, tag2id, id2tag


def tokenize(raw_x, raw_y, vocab2id, tag2id, split):
    x = [[vocab2id[x] for x in s[:-len(split)].lower().split(split)] for s in raw_x]
    y = [[tag2id[x] for x in s[:-len(split)].split(split)] for s in raw_y]
    return x, y


def padding(x, y, max_len, pad=0):
    num_lines = len(x)
    num_tags = 18
    new_x = np.full((num_lines, max_len), pad)
    new_y = np.full((num_lines, max_len), pad)
    for i in range(num_lines):
        to_pad = [0 for _ in range(max_len - len(x[i]))]
        new_x[i, :] = x[i] + to_pad
        new_y[i, :] = y[i] + to_pad
    return new_x, new_y


def gen_dataset(data, vocab2id, tag2id, pad_index=0):
    split = '<end>'
    data['Word_'] = data['Word'] + split
    data['Tag_'] = data['Tag'] + split
    train_data = data[['Sentence #', 'Word_', 'Tag_']].groupby('Sentence #').sum()
    raw_x = train_data['Word_'].to_list()
    raw_y = train_data['Tag_'].to_list()
    x, y = tokenize(raw_x, raw_y, vocab2id, tag2id, split)
    max_len = max([len(s) for s in x])
    x_train, y_train = padding(x, y, max_len, pad=pad_index)
    data = []
    for i in range(len(x)):
        data.append((torch.tensor(x_train[i, :])
                     , torch.tensor(y_train[i, :], dtype=torch.long)))
    return data


def gen_dataloader(batch_size, shuffle=True, pad_index=0, path="data/ner_dataset.csv"):
    data = pd.read_csv(path, encoding="ISO-8859-1")
    data.fillna(method='ffill', inplace=True)
    vocab2id, id2vocab, tag2id, id2tag = gen_dict(data)
    dataset = gen_dataset(data, vocab2id, tag2id, pad_index=pad_index)  # list of x,y
    split_index = int(0.1 * len(dataset))
    train_dataset = dataset[:split_index]
    test_dataset = dataset[split_index:]
    train_dataloader = DataLoader(train_dataset, shuffle=shuffle, batch_size=batch_size)
    test_dataloader = DataLoader(test_dataset, shuffle=shuffle, batch_size=batch_size)
    return vocab2id, id2vocab, tag2id, id2tag, train_dataloader, test_dataloader


class NER(torch.nn.Module):
    def __init__(self, vocab_size, d_model, num_tag):
        super(NER, self).__init__()
        self.vocab_size = vocab_size
        self.d_model = d_model
        self.Embedding = nn.Embedding(vocab_size, d_model, padding_idx=0)
        self.LSTM = nn.LSTM(d_model, int(d_model / 2), num_layers=2, bidirectional=True)
        self.Dense = nn.Linear(d_model, num_tag)
        self.activation = nn.LogSoftmax(dim=-1)
        self.norm = nn.LayerNorm((104, d_model))
        self.relu = nn.ReLU()
    def forward(self, inputs):
        x = self.Embedding(inputs)
        x_rnn, _ = self.LSTM(x)
        x_rnn = self.relu(x_rnn)
        x = x + x_rnn
        x = self.norm(x)
        x = self.Dense(x)
        outputs = self.activation(x)
        return outputs


def count_correct(y_pred, y_true, pad_index=0):
    seq = (y_true != pad_index)
    len_seq = seq.sum().item()
    batch_size = y_true.shape[0]
    correct = (seq & (y_pred.argmax(2) == y_true)).type(torch.float).sum().item()
    return batch_size * correct / len_seq


def test_epoch(dataloader, model):
    size = len(dataloader.dataset)
    model.eval()
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            test_loss += loss_fn(pred.view(-1, pred.shape[-1]), y.view(-1)).item()
            correct += count_correct(pred, y, 0)
    test_loss /= size
    correct /= size
    return test_loss, correct


def train_epoch(dataloader, model, loss_fn, optimizer):
    model.train()
    size = len(dataloader.dataset)
    losses = 0
    acc = 0
    for batch, (X, y) in enumerate(dataloader):
        X, y = X.to(device), y.to(device)
        pred = model(X)
        optimizer.zero_grad()
        loss = loss_fn(pred.view(-1, pred.shape[-1]), y.view(-1))
        acc += count_correct(pred, y)
        loss.backward()
        optimizer.step()
        losses += loss.item()
    scheduler.step()
    # lr_rate = optimizer.param_groups[0]['lr']
    return losses / size, acc / size


def Mytest(raw_x, model, max_len=104):
    global device

    def decode(x, y):
        len_seq = len(x)
        text_y = [id2tag[y.tolist()[0][i]] for i in range(len_seq)]
        return text_y

    x = [vocab2id[i] for i in raw_x] + [0 for _ in range(max_len - len(raw_x))]
    x = torch.tensor(x)
    x = x.unsqueeze(0)
    model.eval()
    with torch.no_grad():
        x = x.to(device)
        pred = model(x)
    y_pred = pred.argmax(2)
    raw_y = decode(raw_x, y_pred)
    print(raw_x)
    print(raw_y)
    for i in range(len(raw_y)):
        if raw_y[i] != 'O':
            print(f"{raw_x[i]}: {raw_y[i]}")


def train(model, epochs, verbose=1, log_count = 5):
    for t in range(epochs):
        start_time = time.time()
        train_loss, train_acc = train_epoch(train_dataloader, model, loss_fn, optimizer)
        val_loss, val_acc = test_epoch(test_dataloader, model)
        end_time = time.time()
        if t % log_count == log_count-1:
            if verbose:
                print(f"Epoch: {t + 1}, Train loss: {train_loss:.4f},acc: {train_acc:.3f}, Val Loss: {val_loss:.2f}" \
                      f" acc: {val_acc:.3f}, Average Epoch Time: {end_time - start_time:.3f}s")


def test_loop(model):
    while 1:
        x = sys.stdin.readline()
        x = re.sub(r"[^\w .]+", "", x)
        if x == "":
            break
        x = x.lower().split()
        Mytest(x, model, max_len=104)


if __name__ == "__main__":
    batch_size = 32
    pad_index = 0
    vocab2id, id2vocab, tag2id, id2tag, train_dataloader, test_dataloader = gen_dataloader(batch_size)
    vocab_size = len(vocab2id)
    d_model = 128
    num_tag = len(tag2id)
    model = NER(vocab_size, d_model, num_tag).to(device)
    loss_fn = nn.CrossEntropyLoss(ignore_index=pad_index)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.02)
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer=optimizer,
                                                  lr_lambda=lambda epoch: 0.97 ** (epoch / 2))
    train(model, epochs=100, verbose=1)
    torch.save(model.state_dict(), "model.pth")
    test_loop(model)
