#!/usr/bin/env python 
# -*- coding: utf-8 -*-
import os
import re
import warnings

from sklearn.feature_extraction import text

warnings.simplefilter(action='ignore', category=FutureWarning)
from nltk.stem import WordNetLemmatizer
import pandas as pd
from gensim.models import Word2Vec
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt

plt.style.use('ggplot')

from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import RandomizedSearchCV

from keras.models import Sequential
from keras import layers
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.wrappers.scikit_learn import KerasClassifier
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_splitplt

pos_dir = os.path.join('data', 'positiveReviews')
neg_dir = os.path.join('data', 'negativeReviews')
src_csv = os.path.join('data', 'src.csv')
cln_csv = os.path.join('data', 'cln.csv')
w2v_mdl = os.path.join('data', 'w2v.mdl')


ENCODEING = 'utf-8'
NEW_LINE = ' '
PUNCTUATION_MARK = ' '
STOP_WORD = ''
stop_words = set(text.ENGLISH_STOP_WORDS)
rex = re.compile(r'[!"#$%&\()*+,./:;<=>?@\\^_{|}~]+')
lemmatizer = WordNetLemmatizer()


def build_data(pos_dir: str, neg_dir: str, out_path: str):
    with open(out_path, 'w', encoding=ENCODEING) as wf:
        wf.write("src\ttag\n")
        # 处理positive
        for file_name in os.listdir(pos_dir):
            if not file_name.endswith(".txt"):
                continue
            file_path = os.path.join(pos_dir, file_name)
            with open(file_path, 'r', encoding=ENCODEING) as f:
                wf.write(f.readline().replace('\t', '') + '\t1\n')
        # 处理negative
        for file_name in os.listdir(neg_dir):
            if not file_name.endswith(".txt"):
                continue
            file_path = os.path.join(neg_dir, file_name)
            with open(file_path, 'r', encoding=ENCODEING) as f:
                wf.write(f.readline().replace('\t', '') + '\t0\n')

def clean_review(raw_review: str) -> str:
    # 1. 评论是爬虫抓取的，存在一些 html 标签，需要去掉
    review_text = raw_review.replace("<br />", '')
    # 2. 标点符号只保留 “-” 和 上单引号
    review_text = rex.sub(' ', review_text)
    # 3. 全部变成小写
    review_text = review_text.lower()
    # 4. 分词
    word_list = review_text.split()
    # 5. 词性还原
    tokens = list(map(lemmatizer.lemmatize, word_list))
    lemmatized_tokens = list(map(lambda x: lemmatizer.lemmatize(x, "v"), tokens))
    # 6. 去停用词
    meaningful_words = list(filter(lambda x: not x in stop_words, lemmatized_tokens))
    return ' '.join(meaningful_words)



def clean_data(in_csv_path: str, out_csv_path: str):
    train = pd.read_csv(in_csv_path, sep='\t')
    train['txt'] = train['src']
    train['txt'] = train['txt'].apply(lambda s: s.replace('<br />', NEW_LINE))  # 去html的br标签
    train['txt'] = train['txt'].apply(lambda sen: " ".join(x.lower() for x in sen.split()))  # 小写
    train['txt'] = train['txt'].str.replace(r'[^\w\s]+', PUNCTUATION_MARK)  # 去标点
    # 可以选择是否去停用词，由于word2vec依赖于上下文，而上下文有可能就是停词。因此对于word2vec，我们可以不用去停词。
    # stop=stopwords.words('english')
    # train['txt']=train['txt'].apply(lambda sen:" ".join(x for x in sen.split() if x not in stop))  # 去停词
    # 词干提取和还原把单词弄得奇奇怪怪，暂时不使用
    # st = PorterStemmer()
    # train['txt']=train['txt'].apply(lambda x:" ".join([st.stem(word) for word in x.split()]))  #  词干提取, 慢
    # train['txt']=train['txt'].apply(lambda x:" ".join([Word(word).lemmatize() for word in x.split()]))  # 词性还原

    train['word_count'] = train['txt'].apply(lambda x: len(str(x).split()))  # 计算单词个数，后续方便截取
    train.drop(columns=['src'])
    train.to_csv(out_csv_path)



# build_data(pos_dir,neg_dir,src_csv)
# clean_data(src_csv, cln_csv)

data=pd.read_csv(src_csv, sep='\t')
data['sen']= data.src.apply(clean_review)
data.to_csv(cln_csv)  # 存起来，后续用
sentences  = data.sen.values

# 算一下每句平均多少词
cal_len = pd.DataFrame()
cal_len['review_lenght'] = list(map(len, sentences))
print("中位数：", cal_len['review_lenght'].median())
print("均值数：", cal_len['review_lenght'].mean())
del cal_len

