import os import time # ruff: noqa: E402 # os.system("pip install --upgrade torch transformers sentencepiece scipy cpm_kernels accelerate bitsandbytes loguru") os.system("pip install torch transformers sentencepiece loguru") from pathlib import Path import torch from logru import logger from transformers import AutoModel, AutoTokenizer # fix timezone in Linux os.environ["TZ"] = "Asia/Shanghai" try: time.tzset() # type: ignore # pylint: disable=no-member except Exception: # Windows logger.warning("Windows, cant run time.tzset()") model_name = "THUDM/chatglm2-6b-int4" # 3.9G tokenizer = AutoTokenizer.from_pretrained( "THUDM/chatglm2-6b-int4", trust_remote_code=True ) has_cuda = torch.cuda.is_available() # has_cuda = False # force cpu logger.debug("load") if has_cuda: if model_name.endswith("int4"): model = AutoModel.from_pretrained(model_name, trust_remote_code=True).cuda() else: model = ( AutoModel.from_pretrained(model_name, trust_remote_code=True).cuda().half() ) else: model = AutoModel.from_pretrained( model_name, trust_remote_code=True ).half() # .float() .half().float() model = model.eval() logger.debug("done load") # tokenizer = AutoTokenizer.from_pretrained("openchat/openchat_v2_w") # model = AutoModelForCausalLM.from_pretrained("openchat/openchat_v2_w", load_in_8bit_fp32_cpu_offload=True, load_in_8bit=True) model_path = model.config._dict["model_name_or_path"] logger.debug(f"{model_path=}") model_size_gb = Path(model_path).stat().st_size / 2**30 logger.info(f"{model_name=} {model_size_gb=:.2f} GB") # with gr.Blocks() as demo: # chatbot = gr.Chatbot() # msg = gr.Textbox() # clear = gr.ClearButton([msg, chatbot]) # def respond(message, chat_history): # response, chat_history = model.chat(tokenizer, message, history=chat_history, temperature=0.7, repetition_penalty=1.2, max_length=128) # chat_history.append((message, response)) # return "", chat_history # msg.submit(respond, [msg, chatbot], [msg, chatbot]) # demo.launch()