import pandas as pd
from nltk.corpus import stopwords
import re
import nltk
from nltk.stem.porter import PorterStemmer
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import numpy as np


def process_en(s, remove_stop=True):
    s = re.sub(r"([?.!,])", r" ", s)  # for split word and punc
    s = re.sub(r'[" "]+', " ", s)
    s = re.sub(r"[^a-zA-Z?.!,¿]+", " ", s)
    s = s.lower().strip().split()
    porter = PorterStemmer()
    if remove_stop:
        stops = set(stopwords.words("english"))
        s = [w for w in s if not w in stops]
    s = [porter.stem(w) for w in s]
    # stemming, other steming: 'LANCASTER','SNOWBALL' etc.
    return s


def get_frequency_dict(df, label):
    """
    feature extraction with frequency
    :param df: entire dataset, df_train or df_test
    :param label: extract sample with positive or negative label
    :return: word frequency dictionary
    """
    dic = {}
    df = df[df['label'] == label]
    for text in df['text']:
        for w in text:
            try:
                dic[w] += 1
            except KeyError:
                dic[w] = 1
    return dic, list(dic.keys())


def load_data():
    df_train = pd.read_csv('data/df_imda8k_train.csv', index_col=0)
    df_test = pd.read_csv('data/df_imda8k_test.csv', index_col=0)
    return df_train, df_test


def data_processing():
    def extract_frequency_feature_pos(word_list):
        nonlocal dic_pos
        sum_frequency = 0
        visited_word = []
        for word in word_list:
            if word not in visited_word:
                sum_frequency += dic_pos.get(word, -1)
            visited_word.append(word)
        return sum_frequency

    def extract_frequency_feature_neg(word_list):
        nonlocal dic_neg
        visited_word = []
        sum_frequency = 0
        for word in word_list:
            if word not in visited_word:
                sum_frequency += dic_neg.get(word, -1)
            visited_word.append(word)
        return sum_frequency

    nltk.download('stopwords')
    df_train, df_test = load_data()
    df_train['text'] = df_train['raw_text'].apply(process_en)
    df_test['text'] = df_test['raw_text'].apply(process_en)
    dic_neg, neg_words = get_frequency_dict(df_train, 0)
    dic_pos, pos_words = get_frequency_dict(df_train, 1)
    df_train['pos_fre'] = df_train['text'].apply(extract_frequency_feature_pos)
    df_train['neg_fre'] = df_train['text'].apply(extract_frequency_feature_neg)
    df_train['len'] = df_train['text'].apply(lambda x: len(x))
    df_train['pos/neg'] = df_train['pos_fre'] / df_train['neg_fre']
    df_test['pos_fre'] = df_test['text'].apply(extract_frequency_feature_pos)
    df_test['neg_fre'] = df_test['text'].apply(extract_frequency_feature_neg)
    return df_train, df_test, dic_neg, neg_words, dic_pos, pos_words


def compute_lambda_table(dic_neg, neg_words, dic_pos, pos_words):
    total_word = list(set(neg_words) | set(pos_words))
    sum_pos = sum(dic_pos.values())
    sum_neg = sum(dic_neg.values())
    pos_freq_table = {}
    neg_freq_table = {}
    for word in total_word:
        pos_freq_table[word] = (dic_pos.get(word, 0) + 1) / (sum_pos + len(pos_freq_table))
        neg_freq_table[word] = (dic_neg.get(word, 0) + 1) / (sum_neg + len(neg_freq_table))
    t_word_freq_table = pd.DataFrame(pos_freq_table, index=['pos'])
    t_word_freq_table.loc['neg', :] = pd.DataFrame(neg_freq_table, index=['neg']).loc['neg', :]
    t_word_freq_table = t_word_freq_table.T
    t_word_freq_table['lambda'] = np.log(t_word_freq_table['pos'] / t_word_freq_table['neg'])
    t_log_prior = np.log(np.sum(t_word_freq_table['pos']) / np.sum(t_word_freq_table['neg']))
    t_word_freq_table.loc['OOV', 'lambda'] = 0
    test_word = set()
    for i in tqdm(df_test['text']):
        for w in i:
            test_word.add(w)
    test_word = list(test_word - set(total_word))
    word_freq_table_test = pd.DataFrame(columns=['pos', 'neg', 'lambda'], index=test_word)
    word_freq_table_test = word_freq_table_test.fillna(0)
    t_word_freq_table = pd.concat([t_word_freq_table, word_freq_table_test])
    return t_log_prior, t_word_freq_table


def naive_bayes_predict(df):
    global word_freq_table, log_prior

    def single_pred(x):
        return np.sum(word_freq_table.loc[x]['lambda']) + log_prior > 0

    y_pred = df['text'].apply(single_pred)
    return y_pred


if __name__ == '__main__':
    df_train, df_test, dic_neg, neg_words, dic_pos, pos_words = data_processing()
    log_prior, word_freq_table = compute_lambda_table(dic_neg, neg_words, dic_pos, pos_words)
    y_pred = naive_bayes_predict(df_test)
    test_y = df_test.label
    acc_score = accuracy_score(test_y, y_pred)
    fscore = f1_score(test_y, y_pred)
    print(f"Naive Bayes accuracy: {acc_score}, f1 score: {fscore}")
