##train.py
'''
导入所需要的库
'''
import os
import json
from keras_bert import load_trained_model_from_checkpoint, Tokenizer
from keras.layers import Input, Dense, Conv1D, Concatenate,MaxPool1D,Flatten,Dropout,GlobalMaxPooling1D,Bidirectional,Lambda
from keras.models import Model
from keras.optimizers import Adam,RMSprop
from keras.utils.np_utils import to_categorical
import codecs
import numpy as np
import pandas as pd
from random import shuffle
from sklearn.preprocessing import LabelEncoder
from sklearn import metrics
from keras.preprocessing import sequence
# from keras.engine import Layer
from keras.callbacks import *

'''
bert相关文件路径
'''
pretrained_path = r'.\chinese_L-12_H-768_A-12'
config_path = os.path.join(pretrained_path, 'bert_config.json')
checkpoint_path = os.path.join(pretrained_path, 'bert_model.ckpt')
dict_path = os.path.join(pretrained_path, 'vocab.txt')

'''
模型超参数
'''
maxlen = 128 #
batch_size = 16
drop_out_size = 0.5
learning_rate = 1e-5
num_epochs = 5
class_num = 3

'''
训练数据输入路径和输出路径
'''
data_path = r'.\data'
train_path = os.path.join(data_path, 'trainData.csv')
dev_path = os.path.join(data_path, 'devData.csv')
test_path = os.path.join(data_path, 'testData.csv')
class_path = os.path.join(data_path, 'class.txt')
outdata_path = r'.\output'


def isParainConfig(config):
    if "pretrained_path" not in config:
        print("The config-file does not have 'pretrained_path'!")
        return False
    
    if "config_path" not in config:
        print("The config-file does not have 'config_path'!")
        return False
    
    if "checkpoint_path" not in config:
        print("The config-file does not have 'checkpoint_path'!")
        return False
    
    if "dict_path" not in config:
        print("The config-file does not have 'dict_path'!")
        return False
    
    if "maxlen" not in config:
        print("The config-file does not have 'maxlen'!")
        return False
    
    if "batch_size" not in config:
        print("The config-file does not have 'batch_size'!")
        return False
    
    if "drop_out_size" not in config:
        print("The config-file does not have 'drop_out_size'!")
        return False
    
    if "learning_rate" not in config:
        print("The config-file does not have 'learning_rate'!")
        return False
    
    if "num_epochs" not in config:
        print("The config-file does not have 'num_epochs'!")
        return False
    
    if "data_path" not in config:
        print("The config-file does not have 'data_path'!")
        return False
    
    if "train_path" not in config:
        print("The config-file does not have 'train_path'!")
        return False
    
    if "dev_path" not in config:
        print("The config-file does not have 'dev_path'!")
        return False
    
    if "test_path" not in config:
        print("The config-file does not have 'test_path'!")
        return False
    
    if "class_path" not in config:
        print("The config-file does not have 'class_path'!")
        return False
    
    if "outdata_path" not in config:
        print("The config-file does not have 'outdata_path'!")
        return False
    
    return True
        
'''
读取超参数
'''
def get_config(model_config_path = r'./config.json'):
    
    global pretrained_path ,config_path, checkpoint_path, dict_path
    global maxlen, batch_size, drop_out_size, learning_rate, num_epochs
    global data_path, train_path, dev_path, test_path, class_path, outdata_path
    
    if os.path.exists(model_config_path) is not True:
        print("The config-file does not exist!")
        return False
    
    with open(model_config_path, "r", encoding="utf-8") as f:
        config = json.load(f)
        if isParainConfig(config) is not True:
            return False
        
        pretrained_path = config.get('pretrained_path')
        config_path = os.path.join(pretrained_path, config.get('config_path'))
        checkpoint_path = os.path.join(pretrained_path, config.get('checkpoint_path'))
        dict_path = os.path.join(pretrained_path, config.get('dict_path'))

        maxlen = config.get('maxlen')
        batch_size = config.get('batch_size')
        drop_out_size = config.get('drop_out_size')
        learning_rate = config.get('learning_rate')
        num_epochs = config.get('num_epochs')

        data_path = config.get('data_path')
        train_path = os.path.join(data_path, config.get('train_path'))
        dev_path = os.path.join(data_path, config.get('dev_path'))
        test_path = os.path.join(data_path, config.get('test_path'))
        class_path = os.path.join(data_path, config.get('class_path'))
        outdata_path = config.get('outdata_path')
        if os.path.exists(outdata_path) is not True:
            os.makedirs(outdata_path)

    return True

class OurTokenizer(Tokenizer):
	def _tokenize(self, text):
		R = []
		for c in text:
			if c in self._token_dict:
				R.append(c)
			elif self._is_space(c):
				R.append('[unused1]') # 用[unused1]来表示空格类字符
			else:
				R.append('[UNK]') # 剩余的字符是[UNK]
		return R
'''
:param: dict_path: 是bert模型的vocab.txt文件
:return:将文件中字进行编码
'''
def get_token_dict(dict_path):
    print("获取编码字典")
    token_dict = {}
    with codecs.open(dict_path, 'r', 'utf8') as reader:
        for line in reader:
            token = line.strip()
            token_dict[token] = len(token_dict)
    return token_dict

'''
# 读取数据的函数
# :return: list  类型的 数据
'''        
def get_data(dataPath):
    print("读取: "+dataPath+"的数据")
    all_data = []    
    df = pd.read_csv(dataPath, sep='\t', header = None, names=['text', 'label'])
    for item in df['text']:
        all_data.append(item)
    return all_data

def get_labelEncode(dataPath):
    print("读取: "+dataPath+"的标签")
    all_data = []    
    df = pd.read_csv(dataPath, sep='\t', header = None, names=['text', 'label'])
    for item in df['label']:
        all_data.append(item)
    return all_data

# 获取标签
def readLable(dataPath, class_path):
    print("读取: "+dataPath+"的标签")
    df_class = pd.read_csv(class_path, sep='\t', header = None, names=['labeltext'])
    all_data = []
    df = pd.read_csv(dataPath, sep='\t', header = None, names=['text', 'label'])
    for item in df['label']:
        all_data.append(df_class['labeltext'][item])
    return all_data
#将标签编码 ##此时还不是one—hot形式
def encodeLable(data):   
    le = LabelEncoder()
    resultLable = le.fit_transform(data)    
    return resultLable

# 让每条文本的长度相同，用0填充
def seq_padding(X, padding=0):
	L = [len(x) for x in X]
	ML = max(L)
	return np.array([
		np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X
	])
##数据生成器
class data_generator:
    def __init__(self, data, tokenizer,batch_size=16):        
        self.data = data
        self.tokenizer = tokenizer        
        self.batch_size = batch_size        
        self.steps = len(self.data) // self.batch_size
        if len(self.data) % self.batch_size != 0:
            self.steps += 1
    def __len__(self):
        return self.steps
    def __iter__(self):
        while True:
            idxs = range(len(self.data))
            X1, X2, Y = [], [], []
            for i in idxs:
                d = self.data[i] # 第i个数据
                text = d[0][:maxlen]
                x1, x2 = self.tokenizer.encode(first=text)
                y = d[1]
                X1.append(x1)
                X2.append(x2)
                Y.append(y)                
                if len(X1) == self.batch_size or i == idxs[-1]:
                	X1 = seq_padding(X1)
                	X2 = seq_padding(X2)
                	Y = seq_padding(Y)
                	yield [X1, X2], Y    
                	X1, X2, Y = [], [], []

def build_model_CNN():
    bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)
    for l in bert_model.layers:
        l.trainable = True
    x1_in = Input(shape=(None,))
    x2_in = Input(shape=(None,))
    x = bert_model([x1_in, x2_in])
    c = Conv1D(128, 3, activation='relu')(x) #使用一维3卷积核进行卷积，relu作为激活函数
    c = GlobalMaxPooling1D()(c) # 最大池化
    c = Dropout(drop_out_size)(c) # 对上一层神经元随机选取一定比例失活，防止过拟合
    output = Dense(class_num, activation='softmax')(c) # 使用softmax作为激活函数，输出维度3
    model = Model([x1_in, x2_in], output)
    model.compile(
        loss='categorical_crossentropy',
        optimizer=Adam(learning_rate), 
        metrics=['accuracy']
    )
    model.summary() # 输出模型情况
    return model

def train_model(allTrainData, allValData, tokenizer,modelName):
    model = build_model_CNN()

    filepath= outdata_path+'\BertNoTrain_'+ modelName+'_{epoch:02d}-{accuracy:.4f}-{val_accuracy:.4f}.h5'
    early_stopping = EarlyStopping(monitor='loss', patience=3,verbose=1)  # 早停法，防止过拟合
    plateau = ReduceLROnPlateau(monitor="loss", verbose=1, mode='max', factor=0.5,
                                patience=2)  # 当评价指标不在提升时，减少学习率
    checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, period=1,
                                     save_best_only=True, mode='min', save_weights_only=False)  # 保存最好的模型
    train_D = data_generator(allTrainData,tokenizer, batch_size)
    valid_D = data_generator(allValData,tokenizer, batch_size)
    history = model.fit_generator(
        train_D.__iter__(),
        steps_per_epoch=len(train_D),
        epochs=num_epochs,
        validation_data=valid_D.__iter__(),
        validation_steps=len(valid_D),
        callbacks=[early_stopping, plateau, checkpoint]
    )
    model.save_weights(outdata_path+ '\keras_bert_'+ modelName+'.h5')
    return model

# 预测测试集
def BertModelPridect(model):

    token_dict = get_token_dict(dict_path)
    tokenizer = OurTokenizer(token_dict)  
    # # 获取预测数据 
    testlable = get_labelEncode(test_path) ##获取标签编码
    valCate = to_categorical(testlable,num_classes=class_num) 
    testdata = get_data(test_path)
    #构造预测数据输入到模型中的格式
    allTestData = []        
    for i in range(len(testdata)):
        allTestData.insert(i,(testdata[i],valCate[i]))

    test_D = data_generator(allTestData,tokenizer, batch_size)
    result = model.predict_generator(test_D.__iter__(),steps=len(test_D), verbose=1)
    return testlable, result

if __name__ == "__main__": 
    get_config()
    token_dict = get_token_dict(dict_path)  
    tokenizer = OurTokenizer(token_dict)
    
    # 获取训练集数据
    trainlable = get_labelEncode(train_path) 
    trainCate = to_categorical(trainlable,num_classes=class_num)##将标签进行one—hot编码
    traindata = get_data(train_path)   
    allTrainData = []        
    for i in range(len(traindata)):
        allTrainData.insert(i,(traindata[i],trainCate[i]))

    # 获取验证数据     
    vallable = get_labelEncode(dev_path) ##获取标签编码
    valCate = to_categorical(vallable,num_classes=class_num) 
    valdata = get_data(dev_path)
    allValData = []        
    for i in range(len(valdata)):
        allValData.insert(i,(valdata[i],valCate[i]))      

    # 开始训练
    model = train_model(allTrainData, allValData, tokenizer,"CNN")
    
    # 进行预测
    testlable, result = BertModelPridect(model)
    # 读取类别转化为标签
    df_class = pd.read_csv(class_path, sep='\t', header = None, names=['labeltext'])
    resultlable = []
    for i,each in enumerate(result):
        label = np.argmax(each)
        resultlable.append(df_class['labeltext'][label])

    testdata = get_data(test_path)
    df2 = pd.DataFrame({'label':resultlable, 'text':testdata})
    df2.to_csv(os.path.join(outdata_path, '预测结果.csv'), index=False, header=True)

    report = metrics.classification_report(testlable, resultlable)
    print(report)
    print('---------------------')
    confusion_matrix = metrics.confusion_matrix(testlable, resultlable)
    print('confusion_matrix: ')
    print(confusion_matrix)
    print('---------------------')
    accuracy_score = metrics.accuracy_score(testlable, resultlable)
    print('accuracy_score: ')
    print(accuracy_score)
    print('---------------------')
    precision_score = metrics.precision_score(testlable, resultlable,average = "weighted")
    print('precision_score: ')
    print(precision_score)
    print('---------------------')
    f1_score = metrics.f1_score(testlable, resultlable,average ="weighted")
    print('f1_score: ')
    print(f1_score)
    print('---------------------')
    recall_score= metrics.recall_score(testlable, resultlable,average ="weighted")
    print('recall_score: ')
    print(recall_score)
    print('---------------------')