# -*- coding: utf-8 -*-
# @file: extract_feature.py
# @author: ZhuJiahui
# @time: 2019/5/12 16:56
# @version: v1.0


import os
import tensorflow as tf

tf.logging.set_verbosity(tf.logging.INFO)

current_directory = os.getcwd().replace('\\', '/')
project_directory = os.path.dirname(current_directory)
#file_path = os.path.dirname(__file__)

model_dir = project_directory + '/chinese_L-12_H-768_A-12/'
config_name = model_dir + 'bert_config.json'

ckpt_name = model_dir + 'bert_model.ckpt'
output_dir = project_directory + '/result/'
if not os.path.exists(output_dir):
    os.mkdir(output_dir)
vocab_file = os.path.join(model_dir, 'vocab.txt')
data_dir = project_directory + '/data/'
if not os.path.exists(data_dir):
    os.mkdir(data_dir)

num_train_epochs = 10
batch_size = 128
learning_rate = 0.00005

# gpu使用率
gpu_memory_fraction = 0.8

# 默认取倒数第二层的输出值作为句向量
layer_indexes = [-2]

# 序列的最大程度，单文本建议把该值调小
max_seq_len = 32
