#!/usr/bin/env python 
# -*- coding: utf-8 -*-
import os
import re
import warnings

import keras
from sklearn.feature_extraction import text
from tensorflow.python.keras import Model
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.optimizer_v1 import Adam

warnings.simplefilter(action='ignore', category=FutureWarning)
from nltk.stem import WordNetLemmatizer
import pandas as pd
from gensim.models import Word2Vec
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt


from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import RandomizedSearchCV

from keras.models import Sequential
from keras import layers
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import Sequential
from keras.layers import Embedding, Bidirectional, LSTM, Dropout, Dense, Input, Conv2D, Conv1D, MaxPooling1D, \
    concatenate, Flatten

pos_dir = os.path.join('data', 'positiveReviews')
neg_dir = os.path.join('data', 'negativeReviews')
src_csv = os.path.join('data', 'src.csv')
cln_csv = os.path.join('data', 'cln.csv')
w2v_mdl = os.path.join('data', 'w2v.mdl')


# 从读csv开始，续上数据清理的步骤
df = pd.read_csv(cln_csv,index_col=0)
sentences = df.sen.str.split()

# 训练word vector
from gensim.models import Word2Vec
embedding_vector_size = 128
w2v_model = Word2Vec(
    sentences=sentences,
    size=embedding_vector_size,
    min_count=3, window=5, workers=4)

# 取得所有单词
vocab_list = list(w2v_model.wv.vocab.keys())  # 33531个
# 每个词语对应的索引
word_index = {word: index for index, word in enumerate(vocab_list)}
# 序列化
def get_index(sentence):
    global word_index
    sequence = []
    for word in sentence:
        try:
            sequence.append(word_index[word])
        except KeyError:
            # print("word {} not in w2v vocab.".format(word))
            pass  # 跳过低频词，用UNK或许能获得更好的效果
    return sequence

X_data = list(map(get_index, sentences))

# 截长补短
# max_len 根据中位数和平均值得来的
maxlen = 100
X_pad = pad_sequences(X_data, maxlen=maxlen)
# 取得标签
Y = df.tag.values
# 划分数据集
X_train, X_test, Y_train, Y_test = train_test_split(
    X_pad,
    Y,
    test_size=0.2,
    random_state=42)

# 让 Keras 的 Embedding 层使用训练好的Word2Vec权重
embedding_matrix = w2v_model.wv.vectors

# LSTM 0.81 ##############################################################
lstm_size = 128
dense_size = 128
model = Sequential()
model.add(Embedding(
    input_dim=embedding_matrix.shape[0],
    output_dim=embedding_matrix.shape[1],
    input_length=maxlen,
    weights=[embedding_matrix],
    trainable=False))
model.add(Bidirectional(LSTM(lstm_size, recurrent_dropout=0.1)))
model.add(Dropout(0.25))
model.add(Dense(dense_size, activation='sigmoid'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))

model.compile(
    loss="binary_crossentropy",
    optimizer='adam',
    metrics=['accuracy']
)
history = model.fit(
    x=X_train,
    y=Y_train,
    validation_data=(X_test, Y_test),
    batch_size=50,
    epochs=8
)