import pandas as pd
from nltk.corpus import stopwords
import numpy as np
stop_words = stopwords.words('english')
import warnings
warnings.filterwarnings(action='ignore')


path = "../data/"

train = pd.read_pickle(path + "train_clean.pkl")
valid = pd.read_pickle(path + "valid_clean.pkl")
dev = pd.read_pickle(path+'dev_clean.pkl')

data_all = pd.concat([train,valid,dev])
data_all.reset_index(inplace=1,drop=1)


def generate_stats_len_feature(x,mode='max'):
    wl = str(x).strip().split()
    wlen = []
    for w in wl:
        wlen.append(len(w))
    if mode=='max':
        return np.max(wlen)
    elif mode=='mean':
        return np.mean(wlen)
    else:
        return np.min(wlen)


res = pd.DataFrame()
res['len_sen'] = data_all.context.apply(lambda x: len(str(x)))
res['len_word'] = data_all.context.apply(lambda x: len(str(x).strip().split()))
res['len_max_word'] = data_all.context.apply(lambda x: generate_stats_len_feature(x, 'max'))
res['len_min_word'] = data_all.context.apply(lambda x: generate_stats_len_feature(x, 'min'))
res['len_mean_word'] = data_all.context.apply(lambda x: generate_stats_len_feature(x, 'mean'))

res = res.values
train_fea = res[:train.shape[0]]
valid_fea = res[train.shape[0]:(train.shape[0]+valid.shape[0])]
dev_fea = res[(train.shape[0]+valid.shape[0]):]

pd.to_pickle(train_fea,path+'train_basic.pkl')
pd.to_pickle(valid_fea,path+'valid_basic.pkl')
pd.to_pickle(dev_fea,path+'dev_basic.pkl')