ffreemt
adb2ab9
raw history blame
No virus
2.79 kB
"""Test various models."""
# pylint: disable=invalid-name, line-too-long,broad-exception-caught, protected-access
import os
import time
from pathlib import Path
import gradio as gr
import torch
from loguru import logger
from transformers import AutoModel, AutoTokenizer
# ruff: noqa: E402
# os.system("pip install --upgrade torch transformers sentencepiece scipy cpm_kernels accelerate bitsandbytes loguru")
# os.system("pip install torch transformers sentencepiece loguru")
# fix timezone in Linux
os.environ["TZ"] = "Asia/Shanghai"
try:
time.tzset() # type: ignore # pylint: disable=no-member
except Exception:
# Windows
logger.warning("Windows, cant run time.tzset()")
model_name = "THUDM/chatglm2-6b-int4" # 3.9G
tokenizer = AutoTokenizer.from_pretrained(
"THUDM/chatglm2-6b-int4", trust_remote_code=True
)
has_cuda = torch.cuda.is_available()
# has_cuda = False # force cpu
logger.debug("load")
if has_cuda:
if model_name.endswith("int4"):
model = AutoModel.from_pretrained(model_name, trust_remote_code=True).cuda()
else:
model = (
AutoModel.from_pretrained(model_name, trust_remote_code=True).cuda().half()
)
else:
model = AutoModel.from_pretrained(
model_name, trust_remote_code=True
).half() # .float() .half().float()
model = model.eval()
logger.debug("done load")
# tokenizer = AutoTokenizer.from_pretrained("openchat/openchat_v2_w")
# model = AutoModelForCausalLM.from_pretrained("openchat/openchat_v2_w", load_in_8bit_fp32_cpu_offload=True, load_in_8bit=True)
# locate model file cache
cache_loc = Path("~/.cache/huggingface/hub").expanduser()
model_cache_path = [elm for elm in Path(cache_loc).rglob("*") if Path(model_name).name in elm.as_posix() and "pytorch_model.bin" in elm.as_posix()]
logger.debug(f"{model_cache_path=}")
if model_cache_path:
model_size_gb = model_cache_path[0].stat().st_size / 2**30
logger.info(f"{model_name=} {model_size_gb=:.2f} GB")
def respond(message, chat_history):
response, chat_history = model.chat(tokenizer, message, history=chat_history, temperature=0.7, repetition_penalty=1.2, max_length=128)
chat_history.append((message, response))
return "", chat_history
theme = gr.themes.Soft(text_size="sm")
with gr.Blocks(theme=theme) as block:
chatbot = gr.Chatbot()
with gr.Column():
with gr.Column(scale=12):
msg = gr.Textbox()
with gr.Column(scale=1, min_width=16):
btn = gr.Button()
with gr.Column(scale=1, min_width=16):
clear = gr.ClearButton([msg, chatbot])
# do not clear prompt
msg.submit(lambda x, y: [x] + respond(x, y)[1:], [msg, chatbot], [msg, chatbot])
btn.click(respond, [msg, chatbot], [msg, chatbot])
block.queue().launch()