# encoding:utf-8
from pathlib import Path
import multiprocessing

BASE_DIR = Path('pycw2vec')  # 创建基础路径
BASE_DIR2 = Path("pyw2c")
DATASET_DIR = Path("/home/stu/Documents/dataset")
sougoCA_DIR = Path("/home/stu/Documents/dataset/sougo/sougoCA_full")
configs = {
    'wiki_ori_data_path': DATASET_DIR / 'zhwiki-latest-pages-articles.xml.bz2',
    'wiki_tmp_txt_path': DATASET_DIR / 'wiki/wiki.cn.txt',
    'wiki_tmp_simple_path': DATASET_DIR / "wiki/wiki.cn.simple.txt",
    'wiki_tmp_sep_path': DATASET_DIR / 'wiki/wiki.cn.simple.separate.txt',
    'wiki_res_data_path': DATASET_DIR / 'wiki/wiki.txt',
    'wiki_w2c_model_path': '/home/stu/Documents/dataset/wiki/wiki.model',

    'sougo_ori_data_path':  sougoCA_DIR / 'sougoCA_full.txt',
    'sougo_tmp_sep_path': sougoCA_DIR / 'sougoCA_sep.txt',
    'sougo_res_data_path': sougoCA_DIR / 'gougoCA_res.txt',
    'sougo_w2c_model_path': '/home/stu/Documents/dataset/sougo/sougoCA_full/sougoCA.model',
    
    'data_path': BASE_DIR / 'dataset/raw/zhihu.txt',  # 原始语料数据
    'model_save_path': BASE_DIR / 'output/checkpoints/cw2vec.pth',
    'vocab_path': BASE_DIR / 'dataset/processed/vocab.pkl',  # 处理后语料数据
    'ngram_vocab_path': BASE_DIR / 'dataset/processed/ngram_vocab.pkl',  # 处理后n-gram数据
    'word_ngrams_path': BASE_DIR / 'dataset/processed/word_ngrams.pkl',
    'word_embedding_path': BASE_DIR / 'output/embedding/cw2vec.bin',
    'all_embedding_path': BASE_DIR / 'output/embedding/all_cw2vec.bin',  # 含ngram
    'char_to_stroke_path': BASE_DIR / 'dataset/processed/char_to_stroke.pkl',  # 字符转化为strokes

    'save_gensim_model_path': BASE_DIR / 'output/checkpoints/gensim_cw2vec.bin',
    'save_gensim_vector_path': BASE_DIR / 'output/embedding/gensim_word_vector.bin',

    'save_sougo_cw_model_path': BASE_DIR / 'output/sougoCA/checkpoints/gensim_cw2vec.bin',
    "save_sougo_cw_vector_path": BASE_DIR / 'output/sougoCA/embedding/gensim_word_vector.bin',

    'save_sentence2idx_path': BASE_DIR / 'dataset/processed/sentence2idx.pkl',
    'save_idx2word_path': BASE_DIR / 'dataset/processed/idx2word.pkl',

    'log_dir': BASE_DIR / 'output/log',           # 模型运行日志
    'log_dir2': BASE_DIR2 / 'output/log',           # 模型运行日志

    'figure_dir': BASE_DIR / 'output/figure',     # 图形保存路径
    'stopword_path': BASE_DIR / 'dataset/raw/stopwords.txt',  # 原始停用词
    'stroke_path': BASE_DIR / 'dataset/raw/strokes.txt',  # 字到笔画的对应关系

    'vocab_size': 200000, #300000,
    'embedding_dim': 200,
    'epochs': 6,
    'batch_size': 256,
    'window_size': 5,
    'negative_sample_num': 5,
    'n_gpus': 1,
    'min_freq': 10,

    'max_seq_len': 70,
    'sample': 1e-3,
    # If 1, uses enriches word vectors with subword(n-grams) information.
    'word_ngrams': 1,
    # If 0, this is equivalent to :class:`~gensim.models.word2vec.Word2Vec`.

    'num_workers': multiprocessing.cpu_count(),
    'learning_rate': 0.0025,
    'weight_decay': 5e-4,
    'lr_min': 0.00001,
    # number of epochs with no improvement after which learning rate will be reduced.
    'lr_patience': 3,
    'mode': 'min',    # one of {min, max}
    'monitor': 'loss',  # 计算指标
    'stroke2idx': {  # 按照原始论文 其他默认为5，这里就不列出
        '横': 1,
        '提': 1,
        '竖': 2,
        '竖钩': 2,
        '撇': 3,
        '捺': 4,
        '点': 4
    },
}
