# !/usr/bin/python3
# -*- coding:utf-8 -*-
# Copyright 2021 The Chinaunicom Software Team. All rights reserved.
# @Author : dengyu
# @Time   : 2021/12/25

##################################################################################################
##                      Configuration of Training model parameters                              ##
##                                                                                              ##
##################################################################################################

import json
import os
from gensim.models import Word2Vec
import pickle
import warnings
from gensim.models import KeyedVectors
warnings.filterwarnings("ignore")


class Configuration(object):
    def __init__(self):
        self.datadir = 'dataset/'   # data gen directory
        self.gen_path = os.getcwd()

        self.Common = dict(
            clean='encyclopedia',
            nb_class=2,      # category
            model_type=['dssm', 'cdssm', 'cgru'][1],
            mode=['train', 'test', 'predict'],
            nb_filter=[128, 256, 128],  # number of cnn filters
            kernel_size=[1, 2, 3],      # cnn kernel size
            denseuni=512,               # dimensionality of dense layer
            l1=0.01,
            l2=0.01,         # l2 regularization ratio, default 0.05
            lr=0.001,        # learning rate
            dropout=0.3,     # dropout
            split=0.1,       # Train/val split proportion
            shuffle=True,    # whether to shuffle training data
            batch_size=128,  # batch size
            epoch=50,
            patience=10,         # patience for early-stopping
            monitor='val_loss',  # monitor for early-stopping and checkpoint
            smooth_factor=0.1,   # smooth factor for category weight
        )

        self.Hyperparameter = dict(
            trainfile='faq_data/new_data/faq_pairs_train_seg.txt',
            valfile='faq_data/new_data/faq_pairs_val_seg.txt',
            testfile = 'faq_data/new_data/faq_pairs_test_seg.txt',
            mode=['train', 'test', 'predict'],
            maxlen=50,
            w2v=[True, False][0],  # whether to use pre-trained word vectors
            w2vdim=200,  # dimensionality of word vectors , Tencent Word2vec model dims is 200 .
            w2idic=json.load(open(self.gen_path + '/src/intelligent_interaction/engine/faq_data/100000-small-w2i.json', 'r', encoding='utf-8')),  # word dictionary file
            vocab_size=len(json.load(open(self.gen_path + '/src/intelligent_interaction/engine/faq_data/100000-small-w2i.json', 'r', encoding='utf-8'))),  # Size of vocab
            # i2v=Word2Vec.load(r'./new_data/word2vec/politics.w2v'),  # Word vectors file
            i2v = KeyedVectors.load_word2vec_format(self.gen_path + "/src/intelligent_interaction/engine/faq_data/100000-small.txt") # Word vectors file
        )
        self.LoadParams = dict(
            type='cdssm',
            # model='faq_data/complete_models/encyclopedia_cdssm_complete/encyclopedia_cdssm_complete.h5',
            model=self.gen_path + '/src/intelligent_interaction/engine/faq_data/complete_models/encyclopedia_cdssm_complete.h5',
            ann=self.gen_path + '/src/intelligent_interaction/engine/faq_data/annoy_model/faq_encyclopedia.ann',
            all_data=self.gen_path + '/src/intelligent_interaction/engine/faq_data/FAQ_encyclopedia_pair_all_qid.csv',
            word2vec_model_f='.w2v',
            use_word2vec=True,
            word2vec_word_map=self.gen_path + '/src/intelligent_interaction/engine/faq_data/100000-small-w2i.json',
            maxlen=50,
            w2v_size=200
        )
        self.Params = self.Hyperparameter

        ### model name and paths ###
        self.model_name = '{}_{}_{}_{}_{}_{}_{}_{}'.format(self.Common['clean'], self.Common['model_type'],
                                            'w2v' if self.Params['w2v'] else 'rand', self.Common['split'],
                                            self.Params['maxlen'], self.Common['batch_size'],
                                            self.Common['l2'], self.Common['dropout'])
        self.model_path = 'faq_data/models/' + self.model_name
        self.result_path = 'faq_data/results/' + self.model_name
        self.all_res_file = 'faq_data/results/all_res.txt'
        self.model_complete_name = '{}_{}_{}'.format(self.Common['clean'], self.Common['model_type'], 'complete')
        self.model_complete_path = 'faq_data/complete_models/' + self.model_complete_name
        self.predict_file = 'faq_data/predict_file.txt'




### numpy vector save path ###
numpy_vec_path = "models/semantic_vec.npy"

### Source file path ###
all_senior_math_data_path = r"./dataset/all_math_newest_1217.json"  # 所有高中数学题的总数据
tid_index_mapping_path = r"./dataset/tid_index_mapping.json"        # tid与index的映射关系字典
