# coding: utf-8
import pandas as pd
import numpy as np
import re
import string
import nltk
from sklearn.utils import shuffle
from bs4 import BeautifulSoup
from nltk.stem import LancasterStemmer, WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from collections import Counter
from collections import defaultdict
import multiprocessing
from multiprocessing import Pool

def strip_html(text):
    soup = BeautifulSoup(text, "html.parser")
    text = soup.get_text()
    r = re.compile(r'<[^>]+>', re.S)
    text = r.sub('', text)
    text = re.sub(r'&(nbsp;)', ' ', text)
    text = re.sub(r'<[^>]+', '', text)
    text = re.sub('\&lt[;]', ' ', text)
    text = re.sub('\&gt[;]', ' ', text)
    return text

def remove_punctuation(text):
    text = re.sub(r'[^\x00-\x7f]',r' ',text)
    text = re.sub("["+string.punctuation+"]", " ", text)
    new_words = []
    words = word_tokenize(text)
    for word in words:
        new_word = re.sub(r'[^\w\s]', '', word)
        if new_word != '':
            new_words.append(new_word)
    return " ".join(new_words)

def remove_non_ascii(text):
    """ remove non-asccii """
    words = word_tokenize(text)
    new_words = []
    for word in words:
        if re.findall(r'[^a-z0-9\,\.\?\:\;\"\'\[\]\{\}\=\+\-\_\)\(\^\&\$\%\#\@\!\`\~ ]', word):
            continue
        new_words.append(word)
    return " ".join(new_words)

def remove_others(text):
    """ remove url """
    text = re.sub(r'(http|ftp|https):\/\/[\w\-_]+(\.[\w\-_]+)+([\w\-\.,@?^=%&amp;:/~\+#]*[\w\-\@?^=%&amp;/~\+#])?', ' spamurl ' , text)
    """ remove email """
    text = re.sub(r'([\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+)', ' email ', text)
    """ remove phone numbers """
    text = re.sub(r'[\@\+\*].?[014789][0-9\+\-\.\~\(\) ]+.{6,}', ' phone ', text)
    """ remove digits """
    text = re.sub(r'[0-9\.\%]+', ' digit ', text)
    return text

def to_lowercase(words):
    """Convert all characters to lowercase from list of tokenized words"""
    new_words = []
    for word in words:
        new_word = word.lower()
        new_words.append(new_word)
    return new_words

def lemmatize_verbs(words):
    lemmatizer = WordNetLemmatizer()
    lemmas = []
    for word in words:
        lemma = lemmatizer.lemmatize(word, pos='v')
        lemmas.append(lemma)
    return lemmas

def denoise_text(text):
    text = text.lower()
    text = strip_html(text)
    text = remove_punctuation(text)
    text = remove_non_ascii(text)
    words = word_tokenize(text)
    words = to_lowercase(words)
    words = lemmatize_verbs(words)
    text = " ".join(words)
    # text = list(filter(lambda c: ord(c) < 128, text))
    # text = ''.join(text)
    if not text.strip():
        text = " "
    return text

def load_dataset(fpath):
    df = pd.read_csv(fpath, sep=",", encoding='latin-1', low_memory=False, error_bad_lines = False)
    df = df[['community', 'title', 'body']]
    df[['body', 'title']].fillna(value='', inplace=True)
    df['body'], df['title'] = df['body'].map(str), df['title'].map(str)
    df['message'] = df[['body', 'title']].apply(lambda x: ' '.join(x), axis=1)
    df = df.dropna()
    df['message'] = df['message'].str.slice(0, 1000)
    # df = df[df['message'].apply(lambda x: len(x) < 1000)]
    df = df.groupby("community").filter(lambda x: len(x) > 1000)
    df = df[["community", "message"]]
    return df.values.tolist()

def text_to_id(all_texts, words, categories):
    words_dict = defaultdict(int)
    for i, w in enumerate(words):
        words_dict[w] = i + 1
    category_dict = dict([(c, i) for i, c in enumerate(categories)])
    for i, line in enumerate(all_texts):
        all_texts[i][0] = str(category_dict[line[0]])
        all_texts[i][1] = ' '.join([str(words_dict[w]) for w in line[1].split()])
    all_texts = ["%s\t%s\n" % (c, w) for c, w in all_texts]
    return all_texts


if __name__ == '__main__':
    all_texts = load_dataset("../data/en_community_content.csv")

    def task(texts):
        res = []
        for line in texts:
            t = denoise_text(line[1])
            if len(t.split()) > 0:
                res.append([line[0].lower(), t])
        return res
    core_num = multiprocessing.cpu_count()
    slices = np.linspace(0, len(all_texts), core_num+1).astype(int)

    pool = Pool(core_num)
    temp = []
    for i in range(core_num):
        slice_texts = all_texts[slices[i]:slices[i+1]]
        temp.append(pool.apply_async(task, args=(slice_texts,)))
    pool.close()
    pool.join()

    result = []
    for t in temp:
        result.extend(t.get())

    # write to disk
    all_texts = ["%s\t%s\n" % (t[0], t[1]) for t in result]
    with open("../data/en_community_content_clean.csv", "w") as fw:
        fw.writelines(all_texts)

    # split for training svm, rf, nb, tree-based models
    train = all_texts[:int(len(all_texts)*0.9)]
    test = all_texts[int(len(all_texts)*0.9):]
    with open("../data/train.csv", "w") as fw:
        fw.writelines(train)
    with open("../data/test.csv", "w") as fw:
        fw.writelines(test)

    # category dict
    counter = Counter()
    counter.update([x[0] for x in result])
    categories = counter.keys()
    categories_p = ["%s,%d\n" % (x[0], x[1]) for x in counter.items()]
    with open("../data/label.csv", "w") as fw:
        fw.writelines(categories_p)

    # build word dict
    counter = Counter()
    for x in result:
        counter.update(x[1].split())
    words = [k for k in counter.keys() if counter.get(k) > 1]
    words_p = ["%s,%d\n" % (x[0], x[1]) for x in counter.items()]
    with open("../data/words.csv", "w") as fw:
        fw.writelines(words_p)

    all_texts = text_to_id(result, words, categories)
    with open("../data/en_community_content_clean_id.csv", "w") as fw:
        fw.writelines(all_texts)
