#!/usr/bin/env python 
# -*- coding: utf-8 -*-
import os
import re
import warnings

import keras
from sklearn.feature_extraction import text
from tensorflow.python.keras import Model
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.optimizer_v1 import Adam

warnings.simplefilter(action='ignore', category=FutureWarning)
from nltk.stem import WordNetLemmatizer
import pandas as pd
from gensim.models import Word2Vec
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt

from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import RandomizedSearchCV

from keras.models import Sequential
from keras import layers
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import Sequential
from keras.layers import Embedding, Bidirectional, LSTM, Dropout, Dense, Input, Conv2D, Conv1D, MaxPooling1D, \
    concatenate, Flatten

pos_dir = os.path.join('data', 'positiveReviews')
neg_dir = os.path.join('data', 'negativeReviews')
src_csv = os.path.join('data', 'src.csv')
cln_csv = os.path.join('data', 'cln.csv')
w2v_mdl = os.path.join('data', 'w2v.mdl')


# 从读csv开始，续上数据清理的步骤
df = pd.read_csv(cln_csv,index_col=0)
sentences = df.sen.str.split()

# 训练word vector
from gensim.models import Word2Vec
embedding_vector_size = 128
w2v_model = Word2Vec(
    sentences=sentences,
    size=embedding_vector_size,
    min_count=3, window=5, workers=4)

# 取得所有单词
vocab_list = list(w2v_model.wv.vocab.keys())  # 33531个
# 每个词语对应的索引
word_index = {word: index for index, word in enumerate(vocab_list)}
# 序列化
def get_index(sentence):
    global word_index
    sequence = []
    for word in sentence:
        try:
            sequence.append(word_index[word])
        except KeyError:
            # print("word {} not in w2v vocab.".format(word))
            pass  # 跳过低频词，用UNK或许能获得更好的效果
    return sequence

X_data = list(map(get_index, sentences))

# 截长补短
# max_len 根据中位数和平均值得来的
maxlen = 100
X_pad = pad_sequences(X_data, maxlen=maxlen)
# 取得标签
Y = df.tag.values
# 划分数据集
X_train, X_test, Y_train, Y_test = train_test_split(
    X_pad,
    Y,
    test_size=0.2,
    random_state=42)

# 让 Keras 的 Embedding 层使用训练好的Word2Vec权重
embedding_matrix = w2v_model.wv.vectors

############### TextCNN 并联  ################################
sequence_length = maxlen  # 100
vocabulary_size = len(word_index)  # 33531
embedding_dim = embedding_vector_size  # 128
filter_sizes = [3, 4, 5]
num_filters = 128  # 每个卷积核生成几个channel
drop = 0.5

epochs = 100
batch_size = 50

# 预训练的词向量中没有出现的词用0向量表示
embedding_matrix = np.zeros((len(word_index) + 1, embedding_vector_size))
for word, i in word_index.items():
    try:
        embedding_vector = w2v_model.wv[str(word)]
        embedding_matrix[i] = embedding_vector
    except KeyError:
        continue


# this returns a tensor
print("Creating Model...")
inputs = Input(shape=(sequence_length,), dtype='int32')
embed = Embedding(input_dim=embedding_matrix.shape[0], output_dim=embedding_dim, input_length=sequence_length)(inputs)
# reshape = Reshape((sequence_length, embedding_dim, 1))(embedding)

conv_0 = Conv1D(num_filters, kernel_size=filter_sizes[0], padding='same', activation='relu')(embed)
conv_0 = MaxPooling1D(pool_size=sequence_length - filter_sizes[0] + 1)(conv_0)
conv_1 = Conv1D(num_filters, kernel_size=filter_sizes[1], padding='same', activation='relu')(embed)
conv_1 = MaxPooling1D(pool_size=sequence_length - filter_sizes[1] + 1)(conv_1)
conv_2 = Conv1D(num_filters, kernel_size=filter_sizes[2], padding='same', activation='relu')(embed)
conv_2 = MaxPooling1D(pool_size=sequence_length - filter_sizes[2] + 1)(conv_2)

cnn = concatenate([conv_0,conv_1,conv_2],axis=-1)
flatten = Flatten()(cnn)
dropout = Dropout(drop)(flatten)
output = Dense(units=3, activation='softmax')(dropout)

# this creates a model that includes
model_cnn2 = Model(inputs=inputs, outputs=output)


model_cnn2.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
print("Traning Model...")
Y_train_one_hot = keras.utils.to_categorical(Y_train, num_classes=3)  # 将标签转换为one-hot编码
Y_test_one_hot = keras.utils.to_categorical(Y_test, num_classes=3)  # 将标签转换为one-hot编码
history_cnn2 = model_cnn2.fit(X_train, Y_train_one_hot,
                              batch_size=batch_size, epochs=epochs,
                              validation_data=(X_test, Y_test_one_hot))
