# https://spaces.ac.cn/archives/8209
# Github:   https://github.com/ZhuiyiTechnology/t5-pegasus
from tokenizer import T5PegasusTokenizer
from transformers.models.mt5.modeling_mt5 import MT5ForConditionalGeneration
import pandas as pd
import tools.tools as tools

from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# tokenizer = AutoTokenizer.from_pretrained("imxly/t5-pegasus")
# model = AutoModelForSeq2SeqLM.from_pretrained("imxly/t5-pegasus")

config = tools.Tools()
config_path = config.config_path
checkpoint_path = config.checkpoint_path
dict_path = config.dict_path
torch_model = config.torch_model

config_path = 'H:\\Repository\\Models\\T5 PEGASUS\\chinese_t5_pegasus_base\\config.json'
checkpoint_path = 'H:\\Repository\\Models\\T5 PEGASUS\\chinese_t5_pegasus_base\\model.ckpt'
dict_path = 'H:\\Repository\\Models\\T5 PEGASUS\\chinese_t5_pegasus_base\\vocab.txt'
torch_model = 'H:\\Repository\\Models\\T5 PEGASUS\\chinese_t5_pegasus_base\\'


if __name__ == "__main__":
    # model_path = 'H:\\Repository\\Models\\T5 PEGASUS\\chinese_t5_pegasus_base\\model.ckpt.data-00000-of-00001'
    # model_path = 'H:\\Repository\\Models\\T5 PEGASUS\\在线下载的\\t5-pegasus\\'
    model_path = "imxly/t5-pegasus"
    model = MT5ForConditionalGeneration.from_pretrained(model_path)
    tokenizer = T5PegasusTokenizer.from_pretrained(model_path)
    text = '蓝蓝的天上有一朵白白的云'
    ids = tokenizer.encode(text, return_tensors='pt')
    output = model.generate(ids,
                            decoder_start_token_id=tokenizer.cls_token_id,
                            eos_token_id=tokenizer.sep_token_id,
                            max_length=30).numpy()[0]
    print(''.join(tokenizer.decode(output[1:])).replace(' ', ''))


    # text = tools.openFile(config.input_path)
    # print(type(text))
    # # torch版本
    # tokenizer = T5PegasusTokenizer.from_pretrained(torch_model)
    # ids = tokenizer.encode(text, return_tensors='pt')
    # model = MT5ForConditionalGeneration.from_pretrained(torch_model)
    # output = model.generate(ids,
    #                         decoder_start_token_id=tokenizer.cls_token_id,
    #                         eos_token_id=tokenizer.sep_token_id,
    #                         top_k=1,
    #                         max_length=30).numpy()[0]
    # torch_res = ''.join(tokenizer.decode(output[1:])).replace(' ', '')
    #
    # print('原文', text)
    # print('torch预测     ' + '\t' + ''.join(tokenizer.decode(output[1:])))
