import pandas as pd
from nltk.corpus import stopwords
import re
import nltk
from nltk.stem.porter import PorterStemmer
from tqdm import tqdm
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, accuracy_score, f1_score
from sklearn.preprocessing import MinMaxScaler, Normalizer
import numpy as np


def process_en(s, remove_stop=True):
    s = re.sub(r"([?.!,])", r" ", s)  # for split word and punc
    s = re.sub(r'[" "]+', " ", s)
    s = re.sub(r"[^a-zA-Z?.!,¿]+", " ", s)
    s = s.lower().strip().split()
    porter = PorterStemmer()
    if remove_stop:
        stops = set(stopwords.words("english"))
        s = [w for w in s if not w in stops]
    s = [porter.stem(w) for w in s]
    # stemming, other steming: 'LANCASTER','SNOWBALL' etc.
    return s


# nltk.download('punkt')
# tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# # we dun need tokenizing here
def tokenizing(s):
    raw_sentences = tokenizer.tokenize(s)
    return raw_sentences


def get_frequency_dict(df, label):
    """
    feature extraction with frequency
    :param df: entire dataset, df_train or df_test
    :param label: extract sample with positive or negative label
    :return: word frequency dictionary
    """
    dic = {}
    df = df[df['label'] == label]
    for text in df['text']:
        for w in text:
            try:
                dic[w] += 1
            except KeyError:
                dic[w] = 1
    return dic, list(dic.keys())


def load_data():
    df_train = pd.read_csv('data/df_imda8k_train.csv', index_col=0)
    df_test = pd.read_csv('data/df_imda8k_test.csv', index_col=0)
    return df_train, df_test


def data_processing():
    def extract_frequency_feature_pos(word_list):
        nonlocal dic_pos
        sum_frequency = 0
        visited_word = []
        for word in word_list:
            if word not in visited_word:
                sum_frequency += dic_pos.get(word, -1)
            visited_word.append(word)
        return sum_frequency

    def extract_frequency_feature_neg(word_list):
        nonlocal dic_neg
        visited_word = []
        sum_frequency = 0
        for word in word_list:
            if word not in visited_word:
                sum_frequency += dic_neg.get(word, -1)
            visited_word.append(word)
        return sum_frequency

    nltk.download('stopwords')
    df_train, df_test = load_data()
    df_train['text'] = df_train['raw_text'].apply(process_en)
    df_test['text'] = df_test['raw_text'].apply(process_en)
    dic_neg, neg_words = get_frequency_dict(df_train, 0)
    dic_pos, pos_words = get_frequency_dict(df_train, 1)
    df_train['pos_fre'] = df_train['text'].apply(extract_frequency_feature_pos)
    df_train['neg_fre'] = df_train['text'].apply(extract_frequency_feature_neg)
    df_train['len'] = df_train['text'].apply(lambda x: len(x))
    df_train['pos/neg'] = df_train['pos_fre'] / df_train['neg_fre']
    df_test['pos_fre'] = df_test['text'].apply(extract_frequency_feature_pos)
    df_test['neg_fre'] = df_test['text'].apply(extract_frequency_feature_neg)
    df_test['len'] = df_test['text'].apply(lambda x: len(x))
    df_test['pos/neg'] = df_test['pos_fre'] / df_test['neg_fre']
    train_x, train_y = df_train[['pos_fre', 'neg_fre', 'len', 'pos/neg']].values, df_train['label'].values
    test_x, test_y = df_test[['pos_fre', 'neg_fre', 'len', 'pos/neg']].values, df_test['label'].values
    return train_x, train_y, test_x, test_y


def hyper_tuning():
    Cs = [0.001, 0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 100, 300, 1000]
    models = [LogisticRegression(C=Cs[i], max_iter=1000, multi_class='ovr') for i in range(len(Cs))]
    score = []
    for i in range(len(Cs)):
        model = models[i]
        model.fit(train_x, train_y)
        score.append(roc_auc_score(test_y, model.predict(test_x)))
    model_index = score.index(max(score))
    chosen_c = Cs[model_index]
    chosen_model = models[model_index]
    thresholds = np.arange(0.05, 0.95, 0.05)
    acc_score = []
    for t in thresholds:
        y_pred = chosen_model.predict_proba(test_x)[:, 1] > t
        acc_score.append(accuracy_score(test_y, y_pred))
    best_t = thresholds[acc_score.index(max(acc_score))]
    y_pred = chosen_model.predict_proba(test_x)[:, 1] > best_t
    fscore = f1_score(test_y, y_pred)
    return best_t, chosen_c, chosen_model, max(acc_score), fscore


if __name__ == '__main__':
    train_x, train_y, test_x, test_y = data_processing()
    _, _, model, acc_score,fscore = hyper_tuning()
    print(f"Logistic function accuracy: {acc_score}, f1 score: {fscore}")

