
# -*- coding:utf-8 -*-
'''
-------------------------------------------------
   Description :  bert/adversarial training
   Author :       liupeng
   Date :         2020-03-02
-------------------------------------------------
'''

import os 
import re
import sys
import time
import argparse
import numpy as np
np.random.seed(2019)
import tensorflow as tf
from bert4keras.optimizers import Adam
from src.model.txt2p import txt2p_bilstm_model
from src.model.txt2p import txt2p_bilstm_attention_model
from src.model.txt2p import txt2p_bert_model
from src.data_load.data_generator import data_bert_generator, data_common_generator, load_data
from src.evaluate.model_evaluate import Precision, Recall, Evaluator
from src.model.adversarial_training import adversarial_training


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_path', type=str, default='./data/wikiw2v.npy')      # './data/wikiw2v.npy'     './data/wikiw2v_lstm.npy'
    parser.add_argument('--data_label', type=str, default='./data/wikipunc.npy')    # './data/wikipunc.npy'    './data/wikipunc_lstm.npy'
    parser.add_argument('--alidata_path', type=str, default='')      # './data/aliw2v.npy'     
    parser.add_argument('--alidata_label', type=str, default='')     # './data/alipunc.npy'    
    parser.add_argument('--adversarial_training', type=bool, default=False)     # adversarial_training
    parser.add_argument('--txt2p_model', type=str, default='bilstm_attention')  # 'bert', 'bilstm', 'bilstm_attention'
    parser.add_argument('--maxlen', type=int, default=95 + 2)
    parser.add_argument('--batch_size', type=int, default=8)
    parser.add_argument('--num_classes', type=int, default=6)
    parser.add_argument('--epochs', type=int, default=50)
    parser.add_argument('--learning_rate', type=float, default=0.001)
    parser.add_argument('--config_path', type=str, default='publish/bert_config.json')
    parser.add_argument('--checkpoint_path', type=str, default='publish/bert_model.ckpt')
    parser.add_argument('--dict_path', type=str, default='publish/vocab.txt' )
    args = parser.parse_args()
    return args

if __name__ == '__main__':

    config = parse_args()
    train_val_data = load_data(config.data_path, config.data_label) 
    # ali_data = load_data('./aliw2v.npy', './alipunc.npy') 
    # 划分 训练集 和 验证集
    random_order = range( len(train_val_data) )
    np.random.shuffle(list(random_order))
    train_data = [train_val_data[j] for i, j in enumerate(random_order) if i % 10 != 1 ] 
    valid_data = [train_val_data[j] for i, j in enumerate(random_order) if i % 10 == 1 ] 
    test_data = valid_data
    train_data = train_data 
    if config.alidata_path != '' and config.alidata_label != '': 
        ali_data = load_data(config.alidata_path, config.alidata_label) 
        train_data = train_data + ali_data
    print ('        txt2p_model:', config.txt2p_model )
    print ('             maxlen:', config.maxlen )
    print ('         batch_size:', config.batch_size )
    print ('        num_classes:', config.num_classes )
    print ('             epochs:', config.epochs )
    print ('      learning_rate:', config.learning_rate )
    print ( '样本数量:', len (train_val_data) )
    print ( '训练集数量:', len(train_data) )
    print ( '验证集数量:', len(valid_data) )
    print ( '测试集数量:', len(test_data) )

    if config.txt2p_model == 'bert':
        model = txt2p_bert_model(num_classes=config.num_classes)
        train_generator = data_bert_generator(train_data, config.batch_size)
        valid_generator = data_bert_generator(valid_data, config.batch_size)
        test_generator = data_bert_generator(test_data, config.batch_size)
    elif config.txt2p_model == 'bilstm':
        model = txt2p_bilstm_model(num_classes=config.num_classes)
        train_generator = data_common_generator(train_data, config.batch_size)
        valid_generator = data_common_generator(valid_data, config.batch_size)
        test_generator = data_common_generator(test_data, config.batch_size)
    elif config.txt2p_model == 'bilstm_attention':
        model = txt2p_bilstm_attention_model(num_classes=config.num_classes)
        train_generator = data_common_generator(train_data, config.batch_size)
        valid_generator = data_common_generator(valid_data, config.batch_size)
        test_generator = data_common_generator(test_data, config.batch_size)
    model.compile(
                loss='categorical_crossentropy',
                optimizer=Adam(config.learning_rate), 
                metrics=['accuracy', Precision, Recall], 
    )
    if config.adversarial_training: 
        adversarial_training(model, 'Embedding-Token', 0.5)
    evaluator = Evaluator(valid_generator, test_generator, model)
    history = model.fit( train_generator.forfit(), steps_per_epoch=len(train_generator), epochs=config.epochs, callbacks=[evaluator] ) 








