#模型下载
from modelscope import snapshot_download,AutoTokenizer,AutoModel
# import torch
#
# model_dir = snapshot_download(model_id='ZhipuAI/glm-4-9b-chat',cache_dir='e:/models')
# tokenizer = AutoTokenizer.from_pretrained(model_dir,trust_remote_code=True)
#
# with torch.no_grad():
#     model = AutoModel.from_pretrained(model_dir,trust_remote_code=True).cpu().float()
#
# model = model.eval()
# response,history = model.stream_chat(tokenizer,'你好',history=[])
# print(response)

import os
from modelscope import snapshot_download
model_dir = snapshot_download('Qwen/Qwen1.5-7B-Chat',cache_dir='e:/models')
os.environ['CUDA_VISIBLE_DEVICES'] = '0'

from swift.llm import ModelType, InferArguments, infer_main
from modelscope import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_dir,trust_remote_code=True)

# infer_args = InferArguments(
#     model_type='qwen1half-7b-chat',
#     model_id_or_path='E:\\models\\Qwen\\Qwen1___5-7B-Chat',
#     eval_human=True)
# infer_main(infer_args)