model_id_name = "modelscope/Llama-2-7b-ms"
from transformers import AutoTokenizer,AutoModelForCausalLM
import torch
from modelscope import snapshot_download

model_id = snapshot_download(model_id=model_id_name, cache_dir="/data/models/modelscope")
print(model_id)

model = AutoModelForCausalLM.from_pretrained(model_id, revision='v1.0.1', device_map='auto', torch_dtype=torch.float16,load_in_8bit=True)
tokenizer = AutoTokenizer.from_pretrained(model_id, revision='v1.0.1')

prompt = "如何提高考试成绩"
inputs = tokenizer(prompt, return_tensors="pt")

# Generate
generate_ids = model.generate(inputs.input_ids.to(model.device), max_length=30)
print(tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0])


# terminators = [
#     pipeline.tokenizer.eos_token_id,
#     pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
# ]
#
# outputs = pipeline(
#     prompt,
#     max_new_tokens=256,
#     eos_token_id=terminators,
#     do_sample=True,
#     temperature=0.6,
#     top_p=0.9,
# )
# print(outputs[0]["generated_text"][len(prompt):])