import pandas as pd
import numpy as np
import nltk
import scipy.stats as sps
from tqdm import tqdm
from nltk.stem import WordNetLemmatizer
import string
import re
from nltk.stem import SnowballStemmer
from snownlp import SnowNLP
import jieba


seed = 1024
np.random.seed(seed)

path = '../data/'

train = pd.read_pickle(path+'train.pkl')
valid = pd.read_pickle(path+'valid.pkl')
dev = pd.read_pickle(path+'dev.pkl')


data_all = pd.concat([train,valid,dev]).values


#clean the text
abbr_dict = {
    "what's": "what is",
    "what're": "what are",
    "who's": "who is",
    "who're": "who are",
    "where's": "where is",
    "where're": "where are",
    "when's": "when is",
    "when're": "when are",
    "how's": "how is",
    "how're": "how are",
    "why's": "why is",
    "why're": "why are",

    "i'm": "i am",
    "we're": "we are",
    "you're": "you are",
    "they're": "they are",
    "it's": "it is",
    "he's": "he is",
    "she's": "she is",
    "that's": "that is",
    "there's": "there is",
    "there're": "there are",

    "i've": "i have",
    "we've": "we have",
    "you've": "you have",
    "they've": "they have",
    "who've": "who have",
    "would've": "would have",
    "not've": "not have",

    "i'll": "i will",
    "we'll": "we will",
    "you'll": "you will",
    "he'll": "he will",
    "she'll": "she will",
    "it'll": "it will",
    "they'll": "they will",

    "isn't": "is not",
    "wasn't": "was not",
    "aren't": "are not",
    "weren't": "were not",
    "can't": "can not",
    "couldn't": "could not",
    "don't": "do not",
    "didn't": "did not",
    "shouldn't": "should not",
    "wouldn't": "would not",
    "doesn't": "does not",
    "haven't": "have not",
    "hasn't": "has not",
    "hadn't": "had not",
    "won't": "will not",
    "mustn't": "must not",

    "e-mail": "email",
    "imrovement": 'improvement',
    "intial": "initial",
    "motorolla": "motorola",
    "programing": "programming",
    "quikly": "quickly",
    "demonitization": "demonetization",
    "60k": "60000",
    " 9 11 ":"911",
    " u s ":'american',
    "b g":"bg",
    "e g":'eg',
    ####add.....
    # r'[^\x00-\xff]+':'NOENGLISH'
}

def text_to_wordlist(text):
    text = text.lower().split()
    text = " ".join(text)#to str
    #clean the text
    text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
    text = re.sub(r"what's", "what is ", text)
    text = re.sub(r"\'s", " ", text)
    text = re.sub(r"\'ve", " have ", text)
    text = re.sub(r"can't", "cannot ", text)
    text = re.sub(r"n't", " not ", text)
    text = re.sub(r"i'm", "i am ", text)
    text = re.sub(r"\'re", " are ", text)
    text = re.sub(r"\'d", " would ", text)
    text = re.sub(r"\'ll", " will ", text)
    #punction replace
    text = re.sub(r",", " ", text)
    text = re.sub(r"\.", " ", text)
    text = re.sub(r"!", " ! ", text)
    text = re.sub(r"\/", " ", text)
    text = re.sub(r"\^", " ^ ", text)#change to  3 words
    text = re.sub(r"\+", " + ", text)
    text = re.sub(r"\-", " - ", text)
    text = re.sub(r"\=", " = ", text)
    text = re.sub(r"'", " ", text)
    text = re.sub(r"60k", " 60000 ", text)
    #text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
    text = re.sub(r":", " : ", text)
    text = re.sub(r" e g ", " eg ", text)
    text = re.sub(r" b g ", " bg ", text)
    text = re.sub(r" u s ", " american ", text)
    text = re.sub(r"\0s", "0", text)
    text = re.sub(r" 9 11 ", "911", text)
    text = re.sub(r"e - mail", "email", text)
    text = re.sub(r"j k", "jk", text)
    text = re.sub(r"\s{2,}", " ", text)
    # Return a list of words
    return (text)

#split ch sentence
def ch_split_words(x):
    w = list(jieba.cut(str(x)))
    return ' '.join(w)

def ch_text_to_wordlist(text):
    # punction replace
    text = re.sub(r"\r", " ", text)
    text = re.sub(r"\n", " ", text)
    text = re.sub(r"，"," ",text)
    text = re.sub(r"。"," ",text)
    return (text)

def preprocessing(context):
    context=str(context).lower()
    for item in abbr_dict.items():
        context=context.replace(item[0],item[1])
    context=context.translate(string.punctuation)
    #question=re.sub(r'[^\x00-\xff]+','non-english',question.decode('utf-8'))
    return context


dd = data_all.values
clean_data = []
for i in tqdm(np.arange(data_all.shape[0])):
    if dd[i,3]=='en':
        clean_data .append(preprocessing(dd[i,0]))
    else:
        clean_data.append(ch_split_words(dd[i,0]))


for i in tqdm(np.arange(data_all.shape[0])):
    if dd[i,3]=='en':
        clean_data[i] = text_to_wordlist(clean_data[i])
    else:
        clean_data[i] = ch_text_to_wordlist(clean_data[i])

data_all['context'] = clean_data

train_clean = data_all.iloc[:train.shape[0],:]
valid_clean = data_all.iloc[train.shape[0]:(train.shape[0]+valid.shape[0]),:]
dev_clean  = data_all.iloc[(train.shape[0]+valid.shape[0]):,:]

pd.to_pickle(train_clean,path+'train_clean.pkl')
pd.to_pickle(valid_clean,path+'valid_clean.pkl')
pd.to_pickle(dev_clean,path+'dev_clean.pkl')

