#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import torch
import numpy as np


# 主路径
MAIN_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SRC_PATH = os.path.join(MAIN_PATH, 'src')

# 当前运行平台
PLATFORM_TYPE = 'pc' # pc、jd和note

# 数据路径，可根据平台不同进行切换
if PLATFORM_TYPE == 'pc':
    BASE_DATA_PATH = r'F:\DataCenter\data_collections\nlp'
elif PLATFORM_TYPE == 'jd':
    BASE_DATA_PATH = '/home/user10000630/notespace/Chatbot_227_ef9a'
else:
    BASE_DATA_PATH = r'F:\DataCenter\data_collections\nlp'

# 数据集存储路径，可根据平台不同进行切换
DATA_PATHS = {
    'pc': {
        'jd_nlp_conversations': os.path.join(BASE_DATA_PATH, 'jd_nlp_conversations'),
        'nlpCDial': os.path.join(BASE_DATA_PATH, r'CDial-GPT-master\data'),
        'jd_nlp_question': os.path.join(BASE_DATA_PATH, 'jd_nlp_question')
    },
    'jd': {
        'jd_nlp_conversations': os.path.join(BASE_DATA_PATH, 'jd_nlp_conversations'),
        'nlpCDial': os.path.join(BASE_DATA_PATH, r'CDial-GPT-master\data'),
        'jd_nlp_question': os.path.join(BASE_DATA_PATH, 'ranking_datasets')
    },
    'note': {
        'jd_nlp_conversations': os.path.join(BASE_DATA_PATH, 'jd_nlp_conversations'),
        'nlpCDial': os.path.join(BASE_DATA_PATH, r'CDial-GPT-master\data'),
        'jd_nlp_question': os.path.join(BASE_DATA_PATH, 'ranking_datasets')
    }
}

# 具体数据存储路径，方便调用
DATA_PATH_INTENTION = DATA_PATHS[PLATFORM_TYPE]['jd_nlp_conversations']
DATA_PATH_RANKING = DATA_PATHS[PLATFORM_TYPE]['jd_nlp_question']
DATA_PATH_BERT = DATA_PATHS[PLATFORM_TYPE]['nlpCDial']

# BASE_DATA_PATH = r'/home/user10000630/dataset/对话数据集/file' # JD平台
PROCESSED_DATA_PATH = os.path.join(MAIN_PATH, 'processed_data')

# 模型保存路径
SAVED_MODEL_PATH = os.path.join(MAIN_PATH, 'saved_models')
SAVED_BERT_PATH = os.path.join(SAVED_MODEL_PATH, 'bert')
SAVED_GENERATIVE_PATH = os.path.join(SAVED_MODEL_PATH, 'generative')
SAVED_GENSIM_PATH = os.path.join(SAVED_MODEL_PATH, 'gensim')
SAVED_TORCH_PATH = os.path.join(SAVED_MODEL_PATH, 'torch')

# 日志文件
LOG_PATH = os.path.join(MAIN_PATH, 'logs')

# 分割符
SEP = '[SEP]'

# 极小的数字
EPSILON = 1e-12

# GPU还是CPU
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# bert相关设置
BATCH_SIZE = 32
LR = 0.0001
MAX_GRAD_NORM = 0.1
MAX_LENGTH = 30

# 空值类型
NULLS = {
    'str': '',
    'int': 0,
    'float': 0.0
}


# 对话闲聊BERT基本设置
class BertConfig(object):
    vocab_size = None
    hidden_size = 768
    num_hidden_layers = 12
    num_attention_heads = 12
    intermediate_size = 3072
    hidden_act = "gelu"
    hidden_dropout_prob = 0.1
    attention_probs_dropout_prob = 0.1
    max_position_embeddings = 512
    type_vocab_size = 2
    initializer_range = 0.02
    layer_norm_eps = 1e-12
# class BertConfig(object):
#     def __init__(
#         self,
#         vocab_size,
#         hidden_size=768,
#         num_hidden_layers=12,
#         num_attention_heads=12,
#         intermediate_size=3072,
#         hidden_act="gelu",
#         hidden_dropout_prob=0.1,
#         attention_probs_dropout_prob=0.1,
#         max_position_embeddings=512,
#         type_vocab_size=2,
#         initializer_range=0.02,
#         layer_norm_eps=1e-12,
#     ):
#
#         self.vocab_size = vocab_size
#         self.hidden_size = hidden_size
#         self.num_hidden_layers = num_hidden_layers
#         self.num_attention_heads = num_attention_heads
#         self.hidden_act = hidden_act
#         self.intermediate_size = intermediate_size
#         self.hidden_dropout_prob = hidden_dropout_prob
#         self.attention_probs_dropout_prob = attention_probs_dropout_prob
#         self.max_position_embeddings = max_position_embeddings
#         self.type_vocab_size = type_vocab_size
#         self.initializer_range = initializer_range
#         self.layer_norm_eps = layer_norm_eps