import os
import pickle
import tensorflow as tf
import random
import pandas as pd
import time
from utils import file_util
from gensim.models import word2vec
from utils import word2vec_util
import numpy as np
from sklearn.metrics import precision_recall_fscore_support
import tensorflow.contrib.keras as kr


#加载数据
def getContentList(rootDir):
    content_path_list = []
    for walk in os.walk(rootDir):
        part_content_list = [os.path.join(walk[0], file) for file in walk[2]]
        content_path_list.extend(part_content_list)
    content_list = []
    for content_path in content_path_list:
        with open(content_path, 'rb') as file:
            part_content_list = pickle.load(file)
        content_list.extend(part_content_list)
    with open('data/label_one_hots.pickle', 'rb') as file:
        label_list = pickle.load(file)
    return content_list, label_list


def readyData(content_codes, label_codes):
    # 数据准备
    startTime = time.time()
    from sklearn.model_selection import train_test_split
    a_content = np.array(content_codes)
    max_word_size = a_content.max(axis=None)
    train_x, test_x, train_y, test_y = train_test_split(content_codes, label_codes)
    used_time = time.time() - startTime
    print('train_test_split used time : %.2f seconds' % used_time)
    class_num = np.unique(label_codes).shape[0]
    return train_x, train_y,  test_x, test_y, max_word_size, class_num

# 创建卷积神经网络并训练
def createCNN(train_x, train_y, test_x, test_y, max_word_size, class_num):
    sequence_length = 300
    embedding_size = 64  # 词向量维度
    num_filters = 256  # 卷积核数目
    filter_size = 5  # 卷积核尺寸
    num_fc_units = 128  # 全连接层神经元
    dropout_keep_probability = 0.5  # dropout保留比例
    learning_rate = 1e-3  # 学习率

    # 重置tensorflow图，加强代码的健壮性
    tf.reset_default_graph()
    # 将每次训练的特征矩阵X和预测目标值Y赋值给变量X_holder和Y_holder
    X_holder = tf.placeholder(tf.int32, [None, sequence_length], name="X_holder")
    Y_holder = tf.placeholder(tf.float32, [None, class_num])
    embedding = tf.get_variable('embedding', [(max_word_size+1), embedding_size])
    embedding_inputs = tf.nn.embedding_lookup(embedding, X_holder, name="embedding_inputs")
    conv = tf.layers.conv1d(embedding_inputs, num_filters, filter_size)
    max_pooling = tf.reduce_max(conv, [1])
    full_connect = tf.layers.dense(max_pooling, num_fc_units)
    full_connect_dropout = tf.contrib.layers.dropout(full_connect, keep_prob=dropout_keep_probability)
    full_connect_activate = tf.nn.relu(full_connect_dropout)
    softmax_before = tf.layers.dense(full_connect_activate, class_num)
    predict_Y = tf.nn.softmax(softmax_before, name="predict_Y")
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y_holder, logits=softmax_before)
    loss = tf.reduce_mean(cross_entropy)
    optimizer = tf.train.AdamOptimizer(learning_rate)
    adam = optimizer.minimize(loss)
    isCorrect = tf.equal(tf.argmax(Y_holder, 1), tf.argmax(predict_Y, 1))
    accuracy = tf.reduce_mean(tf.cast(isCorrect, tf.float32))
    print("accuracy: ", accuracy)
    # 参数初始化
    init = tf.global_variables_initializer()
    session = tf.Session()
    # 创建saver
    saver = tf.train.Saver(max_to_keep=1)
    tf.add_to_collection('pred_network', predict_Y)
    session.run(init)
    # 训练
    step_n = 100
    batch_size = 64  # 每批训练大小
    best_acc_val = 0.94
    # 调用keras.untils库的to_categorical方法将标签编码的结果再做One-Hot编码
    train_Y = kr.utils.to_categorical(train_y, class_num)
    test_Y = kr.utils.to_categorical(test_y, class_num)
    for i in range(step_n):
        selected_index = random.sample(list(range(len(train_y))), k=batch_size)
        batch_X = [train_x[j] for j in selected_index]
        batch_Y = [train_Y[j] for j in selected_index]
        session.run(adam, {X_holder: batch_X, Y_holder: batch_Y})
        step = i + 1
        if step % 10 == 0:
            selected_index2 = random.sample(list(range(len(test_y))), k=batch_size)
            test_batch_X = [test_x[j] for j in selected_index2]
            test_batch_Y = [test_Y[j] for j in selected_index2]
            loss_value, accuracy_value = session.run([loss, accuracy], {X_holder: test_batch_X, Y_holder: test_batch_Y})
            print('step:%d loss:%.4f accuracy:%.4f' % (step, loss_value, accuracy_value))
            if accuracy_value > best_acc_val:
                saver.save(session, 'save/best-model', global_step=step_n)
    # graph名为'my-model-{global_step}.meta'.
    saver.save(session, 'save/my-model', global_step=step_n)
    return session, predict_Y, X_holder


def predictAll(test_X, session, predict_Y, X_holder , batch_size=100):
    predict_value_list = []
    for i in range(0, len(test_X), batch_size):
        selected_X = test_X[i: i + batch_size]
        predict_value = session.run(predict_Y, {X_holder: selected_X})
        predict_value_list.extend(predict_value)
    return np.array(predict_value_list)


def eval_model(y_true, y_pred, labels):
    # 计算每个分类的Precision, Recall, f1, support
    p, r, f1, s = precision_recall_fscore_support(y_true, y_pred)
    # 计算总体的平均Precision, Recall, f1, support
    tot_p = np.average(p, weights=s)
    tot_r = np.average(r, weights=s)
    tot_f1 = np.average(f1, weights=s)
    tot_s = np.sum(s)
    res1 = pd.DataFrame({
        u'Label': labels,
        u'Precision': p,
        u'Recall': r,
        u'F1': f1,
        u'Support': s
    })
    res2 = pd.DataFrame({
        u'Label': ['总体'],
        u'Precision': [tot_p],
        u'Recall': [tot_r],
        u'F1': [tot_f1],
        u'Support': [tot_s]
    })
    res2.index = [-1]
    res = pd.concat([res1, res2])
    return res[['Label', 'Precision', 'Recall', 'F1', 'Support']]

# 生成报告
def generateReport(test_x, test_y, labelEncoder, session, predict_Y, X_holder):
    test_label_list = labelEncoder.inverse_transform(test_y)
    Y = predictAll(test_x, session, predict_Y, X_holder)
    y = np.argmax(Y, axis=1)
    predict_label_list = labelEncoder.inverse_transform(y)
    res = eval_model(test_label_list, predict_label_list, labelEncoder.classes_)
    print('res',res)

# 预测验证
def predictAll(test_x, session, predict_Y, X_holder, batch_size=100):
    predict_value_list = []
    for i in range(0, len(test_x), batch_size):
        selected_X = test_x[i: i + batch_size]
        predict_value = session.run(predict_Y, {X_holder:selected_X})
        predict_value_list.extend(predict_value)
    return np.array(predict_value_list)


def main():
    dir_name = "content_list"
    content_list, label_list = getContentList(dir_name)
    w2vPath = './resources/w2vec.model'
    model = word2vec.Word2Vec.load(w2vPath)
    content_codes = word2vec_util.word2indexMat(model, content_list, 300)
    label_codes, labelEncoder = file_util.label_encodeingEx(label_list)
    train_x, train_y, test_x, test_y, max_word_size, class_num = readyData(content_codes, label_codes)
    # 创建卷积神经网络并训练
    session, predict_Y, X_holder = createCNN(train_x, train_y, test_x, test_y, max_word_size, class_num)
    # 打印报告
    generateReport(test_x, test_y, labelEncoder, session, predict_Y, X_holder)


if __name__ == '__main__':
    main()