import json


class Config:
    def __init__(self):
        self.device = 'cpu'  # default cpu device.
        self.num_user = 97911  # user count 97911 for default.
        self.num_item = 27703  # item count 27703 for default.
        self.vocab_num = 33107  # vocab number 33107 for default.
        self.num_rate = 5  # rating range
        self.hidden_dim = 256  # 128 for amazon books, 256 for amazon electronics

        # dataset parameters.
        self.batch_size = 32
        self.shuffle = True
        self.epoch_reshuffle = True
        self.split_rate = {
            'train': (0.0, 0.8),
            'test': (0.8, 1.0),
            'all': (0.0, 1.0),
        }

        # Gensim only parameters.
        # self.origin_sentence = self.word2vec_data
        self.embedding_dim = 200
        self.gensim_save = './datas/Gensim_word2vec.bin'  # gensim word2vec keyedvectors save path.
        self.gensim_embedding = './datas/Gembedding.npy'  # gensim embedding path.
        self.gensim_word2index = './datas/Gword2index.json'  # gensim word2index file.

        # PMF only parameters.
        self.PMF_K = 256  # PMF hidden dim.
        self.PMF_LR = 0.01  # PMF learning rate.
        self.PMF_epoch = 2  # PMF iterations.
        self.PMF_l2regular = 0.1  # PMF l2 regular.
        self.PMF_user_path = './datas/PMF_user.npy'  # PMF save file path.
        self.PMF_item_path = './datas/PMF_item.npy'
        self.PMF_weight_path = r'./datas/PMF_model.pth'

        # LSTM only parameters.
        self.LSTM_embedding = 256  # encoder LSTM dim
        self.LSTM_epoch = 2  # LSTM epoch
        self.LSTM_LR = 1e-4  # LSTM learning rate.
        self.LSTM_weight_path = './datas/LSTM_model.pth'

        # Review and Rating model parameters.
        self.Review2Rating_LR = 1e-4
        self.Rating2Review_LR = 1e-4
        self.lambda_review2rating = 0.1
        self.lambda_rating2review = 0.1
        self.dual_model_epoch = 2
        self.Review2Rating_weight_path = './datas/Review2Rating.pth'
        self.Rating2Review_weight_path = './datas/Rating2Review.pth'
        # self.grad_clip = 2  # It has not open, for grad clip.

        # input parameters.
        self.input_dict = {
            'sentence_embedding': None,  # [bs, LSTM_embedding]
            'sentence_index': None,  # [bs, seq len]
            'user_id': None,  # [bs]
            'item_id': None,  # [bs]
            'rating_num': None,  # [bs]
            'target_index': None,  # [bs. seq len]
            'sentence_embedding_user_mean': None,  # [bs, LSTM_embedding]
            'sentence_embedding_item_mean': None,  # [bs, LSTM_embedding]
            'sentence_embedding_user_item': None,  # [bs, LSTM_embedding]
        }
        self.vars = 1.0

        # origin data parameters
        self.origin_data = r'./datas/Office_Products_5.json'
        self.word2vec_data = r'./datas/word2vec.txt'
        self.processed_data = r'./datas/Office_Products_5_process.json'
        self.useful_data = r'./datas/Office_Products_5_useful.json'
        self.sentence_len_count_img = r'./datas/sentence_len_count.png'
        self.max_sequence_len = 64
        self.sequence_len_step = 64
        """
        The data flow between this paths is:
        
                       word2vec_data --gensim--> gensim_save
                                 ^                  |
                                 |                 \ /
        origin_data --filter--> useful_data -----indexed-----> processed_data
        
        the origin data path is the key argument you should set!
        """

        # train control.
        self.show_step = 6000

    def __str__(self):
        return json.dumps(self.__dict__, indent=4)


if __name__ == '__main__':
    config = Config()
    print(config)
