#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2020/3/21
# @Author  : geekhch
# @Email   : geekhch@qq.com
# @Desc    : 项目数据IO路径管理工具

from os import path

ROOT_DIR, _     = path.split(__file__)

# NER:CLUENER-2020数据集
CLUENER_DIR     = path.join(ROOT_DIR, 'data/input/ner_CLUE2020')            # 输入数据集
CLUENER_LOG     = path.join(ROOT_DIR, 'data/output/ner_CLUE2020/logs')      # 输出运行日志
CLUENER_MODEL   = path.join(ROOT_DIR, 'data/output/ner_CLUE2020/models')    # 输出训练模型保存
CLUENER_VISAUL  = path.join(ROOT_DIR, 'data/output/ner_CLUE2020/visual')    # 输出可视化日志

# NRE:DulE2.0
DULE_DIR        = path.join(ROOT_DIR, 'data/input/DulE2.0')
DULE_LOG        = path.join(ROOT_DIR, 'data/output/DulE2.0/logs')
DULE_MODEL      = path.join(ROOT_DIR, 'data/output/DulE2.0/models')
DULE_VISUAL     = path.join(ROOT_DIR, 'data/output/DulE2.0/visual')

DULE_NER_LOG        = path.join(ROOT_DIR, 'data/output/duie_ner/logs')
DULE_NER_MODEL      = path.join(ROOT_DIR, 'data/output/duie_ner/models')
DULE_NER_VISUAL     = path.join(ROOT_DIR, 'data/output/duie_ner/visual')

# 词向量文件
VOCAB_SOUGOU    = path.join(ROOT_DIR, 'data/input/emb_pretrain/sgns.sogou.char')  # 搜狗语料预训练词向量模型
VOCAB_CHAR300   = path.join(ROOT_DIR, 'data/input/emb_pretrain/token_vec_300.bin') # 维基百科语料预训练字向量

