import  argparse
parser = argparse.ArgumentParser()

total_path =  ['hit_stopwords.txt','cn_stopwords.txt','scu_stopwords.txt','baidu_stopwords.txt']
# 数据地址
parser.add_argument(
    '--data_dir', type=str, default='G:tf_records/',
    help='Directory for storing input data')
# 模型保存地址
# 训练集总数据量
parser.add_argument(
    '--vocab_path', type=str, default="D:\\data_mining_bert\saved_models\chinese_L-12_H-768_A-12\\vocab.txt",
    help='vocab_path')
# 读取的模型路径
parser.add_argument(
    '--init_checkpoint_path', type=str, default="D:\\data_mining_bert\saved_models\chinese_L-12_H-768_A-12\\bert_model.ckpt",
    help='init_checkpoint_path')
# 日志保存地址
parser.add_argument(
    '--tb_dir', type=str, default='../summary',
    help='TensorBoard log path')
parser.add_argument(
    '--predict_dir', type=str, default='../predict',
    help='predict_images path')
parser.add_argument(
    '--max_seq_length', type=int, default=20,
    help='max_seq_length')
parser.add_argument(
    '--categories', type=int, default=6,
    help='categories')
parser.add_argument(
    '--embedding_dim', type=int, default=100,
    help='embedding_size')
parser.add_argument(
    '--bert_config_path', type=str, default="D:\\data_mining_bert\saved_models\chinese_L-12_H-768_A-12\\bert_config.json",
    help='filter_sizes')
parser.add_argument(
    '--batch_size', type=int, default=128,
    help='batch_size')

FLAGS, _ = parser.parse_known_args()

# model = rebuild_bert( batch_size=128, max_seq_length=20,
#                      is_training=True, categories=6, learning_rate=0.00005)
# # 加载要预测的数据
# model.load_data(x_train=x_train, x_test=x_test, vocab_file=vocab_file)
# # init_checkpoint = "D:\Chinese-BERT-wwm\\bert-use-demo\saved_models\chinese_L-12_H-768_A-12\\bert_model.ckpt"
# init_checkpoint = "D:\Chinese-BERT-wwm\\bert-use-demo\saved_models\model_iter380ac=0.7135416666666666.ckpt"
# model.train(x_train=x_train, x_test=x_test, y_train=y_train, y_test=y_test,
#             init_checkpoint=init_checkpoint, iter_num=1000, iter_per_valid=20)
