import os
import pickle

import numpy as np

# PTB语料库是以文本文件的形式提供的，与原始的PTB的文章相比，多了若干预处理，
# 包括将稀有单词替换成特殊字符<unk>(unk是unknown的简写)，将具体的数字替换成"N"等。
# 我们将经过这些预处理之后的文本数据作为PTB语料库使用。

# 在PTB语料库中，一行保存一个句子。我们将所有句子连接起来，将其视为一个大的时序数据。
# 此时，在每个句子的结尾处插入一个特殊字符<eos>

key_file = {
    'train':'ptb.train.txt',
    'test':'ptb.test.txt',
    'valid':'ptb.valid.txt'
}
save_file = {
    'train':'ptb.train.npy',
    'test':'ptb.test.npy',
    'valid':'ptb.valid.npy'
}
vocab_file = 'ptb.vocab.pkl'

dataset_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = 'ptb'

def load_vocab():
    vocab_path = os.path.join(dataset_dir, vocab_file)
    if os.path.exists(vocab_path):
        with open(vocab_path,'rb') as f:
            word_to_id,id_to_word = pickle.load(f)
        return word_to_id,id_to_word
    
    word_to_id,id_to_word = {},{}
    data_type = 'train'
    file_name = key_file[data_type]
    file_path = os.path.join(dataset_dir,data_dir,file_name)
    # print(file_path)
    words = open(file_path).read().replace('\n','<eos>').strip().split()
    # print(words[:10])

    for i, word in enumerate(words):
        if word not in word_to_id:
            tmp_id = len(word_to_id)
            word_to_id[word] = tmp_id
            id_to_word[tmp_id] = word

    with open(vocab_path,'wb') as f:
        pickle.dump((word_to_id,id_to_word),f)

    return word_to_id,id_to_word

def load_data(data_type="train"):
    """加载ptb数据

    parameters
    -----------
    data_type: 'train' or 'test' or 'valid(val)'
    returns
    --------
    corpus,word_to_id,id_to_word
    """
    if data_type=='val':data_type='valid'
    save_path = os.path.join(dataset_dir,save_file[data_type])

    word_to_id,id_to_word = load_vocab()

    if os.path.exists(save_path):
        corpus = np.load(save_path)
        return corpus,word_to_id,id_to_word
    
    file_name = key_file[data_type]
    file_path = os.path.join(dataset_dir,data_dir,file_name)

    words = open(file_path).read().replace('\n','<eos>').strip().split()
    corpus = np.array([word_to_id[w] for w in words])

    np.save(save_path,corpus)
    return corpus,word_to_id,id_to_word



if __name__ == '__main__':
    for data_type in ('train','val','test'):
        load_data(data_type)