import sys
sys.path.insert(0, '/home/zhangsenzhen/2023Q2/mindformers_1/')

import mindspore as ms
from mindspore import context

from mindformers.models import LlamaConfig, LlamaTokenizer, LlamaForCausalLM
from mindformers.tools.register import MindFormerConfig

context.set_context(device_target='Ascend', device_id=0, mode=0)

inputs = ['I love beijing, because', 'I love beijing, because']

config_path = '/home/zhangsenzhen/2023Q2/mindformers_1/configs/llama/run_llama_7b.yaml'
config = MindFormerConfig(config_path)
model_config = LlamaConfig(**config.model.model_config)
model = LlamaForCausalLM(model_config)

tokenizer = LlamaTokenizer(vocab_file="/home/zhangsenzhen/2023Q2/checkpoint_download/llama/llama_7b/tokenizer.model")

input_ids = tokenizer(inputs, add_special_tokens=False)["input_ids"]

model_outputs = model.generate(input_ids, do_sample=False)

result = tokenizer.decode(model_outputs)

print(result)
while True:
    print("inputs:", end='')
    inputs = input()
    input_ids = tokenizer([inputs]*2, add_special_tokens=False)["input_ids"]
    model_outputs = model.generate(input_ids, do_sample=False)
    result = tokenizer.decode(model_outputs)
    print("outputs:")
    print(result)
