import numpy as np
from sklearn.model_selection import train_test_split
from keras.models import Model
from keras.layers import Input, Embedding, Conv1D, GlobalMaxPool1D
from keras.layers import Dense, Dropout, BatchNormalization
from keras.layers import GRU, MaxPooling1D, Bidirectional
import pandas as pd
import time, os, codecs, pickle, csv, sys
from resnet import block
from evaluate import predict2both, predict2half, predict2top, f1_avg
from keras.models import load_model
from keras import utils

print('Start', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
print('Accusation training, the parameters as following:')

data_dir = "./data_nanning"     # data_nanning is a soft link to target data dir
num_epochs = 30         #30,60,100,200
batch_size = 128        #128,512
embedding_dim = 512     #128,256,512
hidden_dim = 300        #200,300,500,1000
kernel_size = 3         #2,3,5
print("data_dir=%s\tnum_epochs=%d\tbatch_size=%d\tembedding_dim=%d\thidden_dim=%d\tkernel_size=%d" % \
        (data_dir, num_epochs, batch_size, embedding_dim, hidden_dim, kernel_size))

#print('num_words = %d, seq_max_len = %d' % (num_words, seq_max_len))
with codecs.open(os.path.join(data_dir, "vocab.pkl"), mode="rb") as f:
    vocabs = pickle.load(f)     # Type of vocab: list
with codecs.open(os.path.join(data_dir, "labels.pkl"), mode='rb') as f:
    labels = pickle.load(f)     # Type of labels: set
# Load trainset and testset
df_trainset = pd.read_csv(os.path.join(data_dir, "nanning.trainset"), sep="\t", header=None, quoting=csv.QUOTE_NONE)
df_testset = pd.read_csv(os.path.join(data_dir, "nanning.testset"), sep="\t", header=None, quoting=csv.QUOTE_NONE)
num_words = len(vocabs)
seq_max_len = df_trainset.shape[1] - 1
print('num_words=%d\tseq_max_len=%d' % (num_words, seq_max_len))

y_train, fact_train = df_trainset.iloc[:, 0].values, df_trainset.iloc[:, 1:].values
y_test, fact_test = df_testset.iloc[:, 0].values, df_testset.iloc[:, 1:].values
# To one-hot
labels_train = utils.to_categorical(y_train, num_classes=np.max(y_train)+1)
labels_test = utils.to_categorical(y_test, num_classes=np.max(y_train)+1)

print("Train set label_shape={} fact_shape={}".format(labels_train.shape, fact_train.shape))
print("Test set label_shape={} fact_shape={}".format(labels_test.shape, fact_test.shape))
#print(np.min(labels_train), np.max(labels_train), np.min(labels_test), np.max(labels_test))

#sys.exit()  # For Debug

# 定义网络
data_input = Input(shape=[seq_max_len])
word_vec = Embedding(input_dim = num_words,       #num_words + 1,
                     input_length = seq_max_len,
                     output_dim = embedding_dim,
                     mask_zero = 0,
                     name = 'Embedding')(data_input)
block1 = block(x=word_vec, filters_num=embedding_dim, kernel_size=kernel_size)
block2 = block(x=block1, filters_num=embedding_dim, kernel_size=kernel_size)
x = GlobalMaxPool1D()(block2)
x = BatchNormalization()(x)
x = Dense(hidden_dim, activation="relu")(x)
x = Dropout(0.5)(x)     # 0.2 Maybe 0.3, 0.4, 0.5
x = Dense(np.max(y_train)+1, activation="sigmoid")(x)   # labels_train.shape[1]
model = Model(inputs=data_input, outputs=x)
model.compile(loss='categorical_crossentropy',    #'binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# 开始训练
score_list1 = []
score_list2 = []
best_acc = 0
best_f1 = 0
best_epoch = 0
model_path = './model/%d_%d/accusation' % (num_words, seq_max_len)
if not os.path.exists(model_path): os.mkdir(model_path)

for epoch in range(num_epochs):
    model.fit(x=fact_train, y=labels_train, batch_size=batch_size, epochs=1, verbose=1) #epochs=1, verbose=2
    #model.save('./model/%d_%d/accusation/DPCNN_epochs_%d.h5' % (num_words, seq_max_len, epoch))

    y = model.predict(fact_test[:])
    y1 = predict2top(y)
    y2 = predict2half(y)
    y3 = predict2both(y)

    # 只取最高置信度的准确率
    s1 = [(labels_test[i] == y1[i]).min() for i in range(len(y1))]
    # 只取置信度大于0.5的准确率
    s2 = [(labels_test[i] == y2[i]).min() for i in range(len(y1))]
    # 结合前两个
    s3 = [(labels_test[i] == y3[i]).min() for i in range(len(y1))]
    print("Epoch %s\taccu: %.6f\t%.6f\t%.6f" % (epoch, sum(s1)/len(s1), sum(s2)/len(s2), sum(s3)/len(s3)))

    # 只取最高置信度的准确率
    s4 = f1_avg(y_pred=y1, y_true=labels_test)
    # 只取置信度大于0.5的准确率
    s5 = f1_avg(y_pred=y2, y_true=labels_test)
    # 结合前两个
    s6 = f1_avg(y_pred=y3, y_true=labels_test)
    print("Epoch %s\tf1: %.6f\t%.6f\t%.6f" % (epoch, s4, s5, s6))

    score_list1.append([epoch, sum(s1) / len(s1), sum(s2) / len(s2), sum(s3) / len(s3)])
    score_list2.append([epoch, s4, s5, s6])

    # Save the best model
    if (best_acc < sum(s1)/len(s1)) and (best_f1 < s4):
        best_acc, best_f1, best_epoch = sum(s1)/len(s1), s4, epoch
        model.save(os.path.join(model_path, "DPCNN_epochs_%d.h5" % best_epoch))

df_score_list1 = pd.DataFrame(score_list1)
df_score_list1.to_csv(os.path.join(model_path, "score_list1.csv"), encoding="utf-8")
df_score_list2 = pd.DataFrame(score_list2)
df_score_list2.to_csv(os.path.join(model_path, "score_list2.csv"), encoding="utf-8")

print('End\t', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
# nohup python model_DPCNN.py 2>&1 &
