# -*- coding: utf-8 -*-
"""
配置参数
"""
import torch
import os

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
total_epoch = 150  # 总轮次
max_len = 30  # 句子最大词数
batch = 16  # 批量大小
learning_rate = 0.001  # 学习率
dropout = 0.3  # 丢弃概率
fold = 5  # 5折交叉验证
patience = 20  # 早期停止

version = 'Bi_model'  # 版本号
data_path = '../../data/npy'  # 数据集路径
model_path = './model/' + version  # 模型保存路径，/model/版本号/
if not os.path.exists(model_path):
    os.makedirs(model_path)
result_path = './result_and_model/' + version  # 预测结果保存路径，/result_and_model/版本号
dev_result_file = 'dev_result.csv'

intent_num = 24  # intent标签类别数
embedding_size = 768  # 词向量维度，使用预训练的BERT词向量
lstm_hidden_size = 200  # 隐藏层神经元数量

# 拼接路径
# -------------------------输入序列/句子--------------------------
x_train_npy = os.path.join(data_path, 'train_x0.npy')  # 已转词向量
x_dev_npy = os.path.join(data_path, 'dev_x0.npy')
x_test_npy = os.path.join(data_path, 'test_x.npy')

x_train_txt = os.path.join(data_path, 'train_x0.txt')  # TXT文件
x_dev_txt = os.path.join(data_path, 'dev_x0.txt')
x_test_txt = os.path.join(data_path, 'test_x.txt')

# ------------------------Intent 标签路径--------------------------
intent_train_npy = os.path.join(data_path, 'train_intent_y0.npy')  # 未转One-Hot
intent_dev_npy = os.path.join(data_path, 'dev_intent_y0.npy')
intent_test_npy = os.path.join(data_path, 'test_intent.npy')

# ------------------------------Slot 路径/IOB格式------------------------------
slot_train = os.path.join(data_path, 'train_slot_y0.txt')
slot_dev = os.path.join(data_path, 'dev_slot_y0.txt')
slot_test = os.path.join(data_path, 'test_slot.txt')

# 转换字典 'tag2index', 'index2tag', 'word2index'
vocab_slot_file = os.path.join(data_path, 'TagDict_alldata.pkl')
# 记录验证集预测结果
dev_result_file = os.path.join(result_path, dev_result_file)
