songxxzp
Use parallel kernel
7b7d06c
import psutil
import gradio as gr
from functools import partial
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
mem = psutil.virtual_memory()
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
model = AutoModelForSeq2SeqLM.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).quantize(bits=4, compile_parallel_kernel=True, parallel_num=2).cpu().float()
def chat(query, history=[]):
_, history = model.chat(tokenizer, query, history, max_length=512)
return history, history
description = "This is an unofficial chatbot application based on open source model ChatGLM-6B(https://github.com/THUDM/ChatGLM-6B), running on cpu(therefore max_length is limited to 512). \nIf you want to use this chat bot in your space, 'Duplicate this space' by click the button close to 'Linked Models'. \n"
title = "ChatGLM-6B Chatbot"
examples = [["Hello?"], ["你好。"], ["介绍清华"]]
chatbot_interface = gr.Interface(
fn=chat,
title=title,
description=description,
examples=examples,
inputs=["text", "state"],
outputs=["chatbot", "state"]
)
chatbot_interface.launch()