import re
import sys
import numpy as np
from collections import Counter, defaultdict


def gen_training_corpus(raw_data):
    """
    return list of tuples of (word,tag) in order
    """
    corpus = ["UNK\tSOS\n" if len(x.split('\t')) == 1 else x for x in raw_data]
    corpus = [re.sub(r"[^\w\t]+", " ", x) for x in corpus]
    corpus = [tuple(x[:-1].split('\t')) for x in corpus if x.split('\t')[1] != " "]
    return corpus


def load_data(path='data/WSJ_02-21.pos'):
    with open(path, 'r') as fp:
        word_tag = fp.readlines()
    return word_tag


def gen_dicts(training_corpus):
    emission_counts = defaultdict(int)
    transition_counts = defaultdict(int)
    tag_counts = defaultdict(int)
    prev_tag = 'SOS'
    for word, tag in training_corpus:
        word = word.lower()
        transition_counts[(prev_tag, tag)] += 1
        emission_counts[(tag, word)] += 1
        tag_counts[tag] += 1
        prev_tag = tag
    return emission_counts, transition_counts, tag_counts


def gen_emission_matrix(tag_counts, emission_counts, vocab, alpha):
    num_tags = len(tag_counts)
    all_tags = sorted(tag_counts.keys())
    num_words = len(vocab)
    B = np.zeros((num_tags, num_words))
    word_list = list(vocab.keys())
    emis_keys = set(list(emission_counts.keys()))
    for i in range(num_tags):
        for j in range(num_words):
            count = 0
            key = all_tags[i], word_list[j]
            if key in emis_keys:
                count = emission_counts[key]
            count_tag = tag_counts[key[0]]
            B[i, j] = (count + alpha) / (count_tag + alpha * num_words)

    return B


def gen_transition_matrix(tag_counts, transition_counts, alpha=0.001):
    all_tags = sorted(tag_counts.keys())
    num_tags = len(all_tags)
    A = np.zeros((num_tags, num_tags))
    trans_keys = set(transition_counts.keys())

    for i in range(num_tags):
        for j in range(num_tags):
            count = 0
            key = (all_tags[i], all_tags[j])
            if key in trans_keys:
                count = transition_counts[key]
            count_prev_tag = tag_counts[key[0]]
            A[i, j] = (count + alpha) / (count_prev_tag + alpha * num_tags)

    return A


def gen_verbite_data(path='data/WSJ_02-21.pos'):
    """
    :param word_tag:
    :return:
        t2w : emission counts
        t2t : transition counts
    """
    alpha = 0.001
    word_tag = load_data(path)
    training_corpus = gen_training_corpus(word_tag)
    word_count = Counter([x[0].lower() for x in training_corpus])
    vocab = [x for x in word_count if word_count[x] >= 2]
    vocab = {vocab[i]: i for i in range(len(vocab))}
    t2w_counts, t2t_counts, t_counts = gen_dicts(training_corpus)
    t2t_matrix = gen_transition_matrix(t_counts, t2w_counts, alpha)
    t2w_matrix = gen_emission_matrix(t_counts, t2w_counts, vocab, alpha)
    return t2t_matrix, t2w_matrix, vocab, t_counts


def init_viterbi(tag_counts, t2t_arr, t2w_arr, seq, vocab):
    num_tags = len(tag_counts)
    states = sorted(tag_counts.keys())
    best_probs = np.zeros((num_tags, len(seq)))
    best_paths = np.zeros((num_tags, len(seq)), dtype=int)
    s_idx = states.index("SOS")
    for i in range(num_tags):
        if t2t_arr[s_idx][i] == 0:
            best_probs[i, 0] = float('-inf')
        else:
            best_probs[i, 0] = np.log(t2t_arr[s_idx, i]) + np.log(t2w_arr[i, vocab[seq[0]]])
    return best_probs, best_paths


def viterbi_forward(t2t_arr, t2w_arr, input_seq, C, D, vocab):
    num_tags = C.shape[0]
    for i in range(1, len(input_seq)):
        for j in range(num_tags):
            best_prob_i = float('-inf')
            best_path_i = None
            for k in range(num_tags):
                current_word = vocab[input_seq[i]]
                prob = C[k, i - 1] + np.log(t2w_arr[j, current_word]) + np.log(t2t_arr[k, j])
                if prob >= best_prob_i:
                    best_prob_i = prob
                    best_path_i = k
            C[j, i] = best_prob_i
            D[j, i] = best_path_i
    return C, D


def viterbi_backward(C, D, tag_list):
    m = D.shape[1]
    z = [None] * m
    num_tags = C.shape[0]
    best_prob_for_last_word = float('-inf')
    pred = [None] * m
    for k in range(num_tags):
        if C[k, m - 1] > best_prob_for_last_word:
            best_prob_for_last_word = C[k, -1]
            z[m - 1] = k
    pred[m - 1] = tag_list[z[m - 1]]
    for i in range(m - 1, 0, -1):
        pos_tag_for_word_i = z[i]
        z[i - 1] = D[pos_tag_for_word_i, i]
        pred[i - 1] = tag_list[z[i - 1]]
    return pred


def run_viterbi(seq_temp, tag_count, t_vocab, t2t_arr, t2w_arr):

    C, D = init_viterbi(tag_count, t2t_arr, t2w_arr, seq_temp, t_vocab)
    C, D = viterbi_forward(t2t_arr, t2w_arr, seq_temp, C, D, t_vocab)
    return viterbi_backward(C, D, sorted(tag_count.keys()))


if __name__ == "__main__":
    t2t_arr, t2w_arr, vocab, tag_count = gen_verbite_data()
    print('Enter sentence:')
    while 1:
        seq = sys.stdin.readline()
        seq = re.sub(r"[^\w ]+", "", seq)
        if not seq:
            break
        seq = seq.lower().split()
        print(seq)
        pred = run_viterbi(seq, tag_count, vocab, t2t_arr, t2w_arr)
        print(pred)
