#-*-coding:utf-8-*-
import jieba
import pandas as pd
import re
import string
import codecs


train_data = pd.read_csv('../data/training.csv', header=None)
# train_data = pd.read_csv('../data/test_train.csv', header=None)
train_data.columns = ['label', 'description']
train_data_processed = train_data.copy()

test_data = pd.read_csv('../data/testing.csv',header=None)
test_data.columns = ['id', 'description']
test_data_processed = test_data.copy()
# 中文标点
punc = "、！？｡。＂＃＄％＆＇（（）＊＋，－／：；＜＝〓\\‖―＞＠［＼］＾＿｀｛｜｝～｟｠｢｣､〃《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
# 英文标点
punc = punc + string.punctuation
# 获取停用词
def getStopwords():
    stopwords = []
    with open("../data/stopwords.txt",  encoding='utf8') as f:
        lines = f.readlines()
        for line in lines:
            stopwords.append(line.strip())
    return stopwords
stopwords = getStopwords()

def seg_without_punctuation(x):
    # 分词
    words = jieba.cut(x)
    # 去停用词和纯数字
    words = [word for word in list(words) if word not in stopwords and not word.isdigit()]
    # 去特殊符号
    result = re.sub("[%s]+" %punc,' ', ' '.join(words))
    # 连续空格转为一个空格
    result = re.sub(' +', ' ', result)
    # 特殊符号处理后再去次全数字，解决原来是数字+特殊符号问题
    result = ' '.join([word for word in result.split(' ') if word not in stopwords and not word.isdigit()]).strip()
    return result

train_data_processed['description'] = train_data_processed['description'].map(seg_without_punctuation)
test_data_processed['description'] = test_data_processed['description'].map(seg_without_punctuation)

train_data_processed.to_csv('../data/train_processed.csv',index=False, encoding = 'utf-8')
test_data_processed.to_csv('../data/test_processed.csv',index=False, encoding = 'utf-8')


