import torch
import torch.nn.functional as F

from project.enchanter_gpt import gpt_config


from project.enchanter_gpt.tokenize_vocab import GxlCharTokenizer, load_txt_data_to_list, gxl_tokenizer

if __name__ == '__main__':
    args = gpt_config.GPTConfig()
    logger = gpt_config.gpt_logger
    print('vocab_size: ', gxl_tokenizer.vocab_size)
    # gxl_tokenizer.re_build_vocab_sorted("F:\code\python\deeplearning\pythonProject\\ai\data\gpt\data\作文数据集/")
    data_list = load_txt_data_to_list()
    a = data_list[10]
    # a_idx = gxl_tokenizer.encoder(gxl_tokenizer.tokenizer(a))
    print(a)
    a_t = gxl_tokenizer.decoder(a[0])
    print(a_t)
    # for token,id in gxl_tokenizer.vocab.items():
    #     print(token,id)
