import random

import gxl_ai_utils.utils.utils_file
from transformers import GPT2LMHeadModel, GPT2Tokenizer, BertTokenizer

from wenet.utils import gxl_utils
from gxl_ai_utils.utils import utils_file


def do_make_test_file_1():
    """"""
    output_file_path = './output/test.list'
    utils_file.makedir_for_file(output_file_path)
    # Load model directly
    from transformers import AutoTokenizer, AutoModelForCausalLM
    tokenizer = AutoTokenizer.from_pretrained("IDEA-CCNL/Yuyuan-GPT2-110M-SciFi-Chinese")
    model = AutoModelForCausalLM.from_pretrained("IDEA-CCNL/Yuyuan-GPT2-110M-SciFi-Chinese")
    # 设置模型为eval模式
    model.eval()
    # 娱乐领域的词汇列表
    entertainment_vocab = ["明星", "综艺节目", "电影", "音乐", "娱乐圈", "粉丝"]
    # 生成100句句子
    for _ in range(100):
        # 随机选择一个主题
        subject = random.choice(entertainment_vocab)
        # 构造输入文本
        input_text = f"最近有关{subject}的新闻真是让人惊讶，"
        input_text = '你好呀,我是机器人,我在听你说话'
        # 将文本转换为模型输入的token
        input_ids = tokenizer.encode(input_text, return_tensors="pt")
        # 使用模型生成句子
        generation_output = model.generate(input_ids,
                                           return_dict_in_generate=True,
                                           output_scores=True,
                                           max_length=150,
                                           # max_new_tokens=80,
                                           do_sample=True,
                                           top_p=0.6,
                                           # num_beams=5,
                                           eos_token_id=50256,
                                           pad_token_id=0,
                                           num_return_sequences=5)
        for idx, sentence in enumerate(generation_output.sequences):
            print('next sentence %d:\n' % idx,
                  tokenizer.decode(sentence).split('<|endoftext|>')[0])
            print('*' * 40)


def do_test_inference_utils():
    """"""
    # whisper_model, configs = gxl_utils.Whisper_Utils.load_whisper('tiny')
    # whisper_tokenizer = gxl_utils.Tokenizer_Utils.get_whisper_tokenizer(
    #     configs
    # )
    # gxl_utils.Inference_Utils.do_inference_for_file(whisper_model, whisper_tokenizer, './output/data.list',
    #                                                 "raw", configs['dataset_conf'])
    gxl_utils.Whisper_Utils.print_all_release()


if __name__ == '__main__':
    # do_make_test_file_1()
    do_test_inference_utils()
