# import gradio as gr
# model_name = "models/THUDM/chatglm2-6b-int4"
# gr.load(model_name).lauch()
# %%writefile demo-4bit.py
from textwrap import dedent
# credit to https://github.com/THUDM/ChatGLM2-6B/blob/main/web_demo.py
# while mistakes are mine
from transformers import AutoModel, AutoTokenizer
import gradio as gr
import mdtex2html
from loguru import logger
model_name = "THUDM/chatglm2-6b"
model_name = "THUDM/chatglm2-6b-int4"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
# model = AutoModel.from_pretrained(model_name, trust_remote_code=True).cuda()
# 4/8 bit
# model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).quantize(4).cuda()
import torch
has_cuda = torch.cuda.is_available()
# has_cuda = False # force cpu
if has_cuda:
model = AutoModel.from_pretrained(model_name, trust_remote_code=True).cuda() # 3.92G
else:
model = AutoModel.from_pretrained(model_name, trust_remote_code=True).half() # .float() .half().float()
model = model.eval()
_ = """Override Chatbot.postprocess"""
def postprocess(self, y):
if y is None:
return []
for i, (message, response) in enumerate(y):
y[i] = (
None if message is None else mdtex2html.convert((message)),
None if response is None else mdtex2html.convert(response),
)
return y
gr.Chatbot.postprocess = postprocess
def parse_text(text):
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'
'
else:
lines[i] = f'
'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
line = line.replace("*", "*")
line = line.replace("_", "_")
line = line.replace("-", "-")
line = line.replace(".", ".")
line = line.replace("!", "!")
line = line.replace("(", "(")
line = line.replace(")", ")")
line = line.replace("$", "$")
lines[i] = "
"+line
text = "".join(lines)
return text
def predict(RETRY_FLAG, input, chatbot, max_length, top_p, temperature, history, past_key_values):
chatbot.append((parse_text(input), ""))
for response, history, past_key_values in model.stream_chat(tokenizer, input, history, past_key_values=past_key_values,
return_past_key_values=True,
max_length=max_length, top_p=top_p,
temperature=temperature):
chatbot[-1] = (parse_text(input), parse_text(response))
yield chatbot, history, past_key_values
def trans_api(input, max_length=4096, top_p=0.8, temperature=0.2):
if max_length < 100:
max_length = 4096
if top_p < 0.1:
top_p = 0.8
if temperature <= 0:
temperature = 0.01
try:
res, _ = model.chat(
tokenizer,
input,
history=[],
past_key_values=None,
max_length=max_length,
top_p=top_p,
temperature=temperature,
)
# logger.debug(f"{res=} \n{_=}")
except Exception as exc:
logger.error(f"{exc=}")
res = str(exc)
return res
def reset_user_input():
return gr.update(value='')
def reset_state():
return [], [], None
# Delete last turn
def delete_last_turn(chat, history):
if chat and history:
chat.pop(-1)
history.pop(-1)
return chat, history
# Regenerate response
def retry_last_answer(
user_input,
chatbot,
max_length,
top_p,
temperature,
history,
past_key_values
):
if chatbot and history:
# Removing the previous conversation from chat
chatbot.pop(-1)
# Setting up a flag to capture a retry
RETRY_FLAG = True
# Getting last message from user
user_input = history[-1][0]
# Removing bot response from the history
history.pop(-1)
yield from predict(
RETRY_FLAG,
user_input,
chatbot,
max_length,
top_p,
temperature,
history,
past_key_values
)
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.HTML("""ChatGLM2-6B-int4
""")
gr.HTML("""To avoid the queue and for faster inference Duplicate this Space and upgrade to GPU""")
with gr.Accordion("Info", open=False):
_ = """
A query takes from 30 seconds to a few tens of seconds, dependent on the number of words/characters
the question and answer contain.
* Low temperature: responses will be more deterministic and focused; High temperature: responses more creative.
* Suggested temperatures -- translation: up to 0.3; chatting: > 0.4
* Top P controls dynamic vocabulary selection based on context.
For a table of example values for different scenarios, refer to [this](https://community.openai.com/t/cheat-sheet-mastering-temperature-and-top-p-in-chatgpt-api-a-few-tips-and-tricks-on-controlling-the-creativity-deterministic-output-of-prompt-responses/172683)
If the instance is not on a GPU (T4), it will be very slow. You can try to run the colab notebook [chatglm2-6b-4bit colab notebook](https://colab.research.google.com/drive/1WkF7kOjVCcBBatDHjaGkuJHnPdMWNtbW?usp=sharing) for a spin.
The T4 GPU is sponsored by a community GPU grant from Huggingface. Thanks a lot!
"""
gr.Markdown(dedent(_))
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(scale=4):
with gr.Column(scale=12):
user_input = gr.Textbox(show_label=False, placeholder="Input...", ).style(
container=False)
RETRY_FLAG = gr.Checkbox(value=False, visible=False)
with gr.Column(min_width=32, scale=1):
with gr.Row():
submitBtn = gr.Button("Submit", variant="primary")
deleteBtn = gr.Button("Delete last turn", variant="secondary")
retryBtn = gr.Button("Regenerate", variant="secondary")
with gr.Column(scale=1):
emptyBtn = gr.Button("Clear History")
max_length = gr.Slider(0, 32768, value=8192/2, step=1.0, label="Maximum length", interactive=True)
top_p = gr.Slider(0, 1, value=0.8, step=0.01, label="Top P", interactive=True)
temperature = gr.Slider(0.01, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
history = gr.State([])
past_key_values = gr.State(None)
user_input.submit(predict, [user_input, chatbot, max_length, top_p, temperature, history, past_key_values],
[chatbot, history, past_key_values], show_progress=True)
submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history, past_key_values],
[chatbot, history, past_key_values], show_progress=True, api_name="predict")
submitBtn.click(reset_user_input, [], [user_input])
emptyBtn.click(reset_state, outputs=[chatbot, history, past_key_values], show_progress=True)
retryBtn.click(
retry_last_answer,
inputs = [user_input, chatbot, max_length, top_p, temperature, history, past_key_values],
#outputs = [chatbot, history, last_user_message, user_message]
outputs=[chatbot, history, past_key_values]
)
deleteBtn.click(delete_last_turn, [chatbot, history], [chatbot, history])
with gr.Accordion("For Translation API", open=False):
input_text = gr.Text()
tr_btn = gr.Button("Go", variant="primary")
out_text = gr.Text()
tr_btn.click(trans_api, [input_text, max_length, top_p, temperature], out_text, show_progress=True, api_name="tr")
input_text.submit(trans_api, [input_text, max_length, top_p, temperature], out_text, show_progress=True, api_name="tr")
with gr.Accordion("Example inputs", open=True):
examples = gr.Examples(
examples=[["Explain the plot of Cinderella in a sentence."],
["How long does it take to become proficient in French, and what are the best methods for retaining information?"],
["What are some common mistakes to avoid when writing code?"],
["Build a prompt to generate a beautiful portrait of a horse"],
["Suggest four metaphors to describe the benefits of AI"],
["Write a pop song about leaving home for the sandy beaches."],
["Write a summary demonstrating my ability to tame lions"]],
inputs = [user_input],
)
# demo.queue().launch(share=False, inbrowser=True)
# demo.queue().launch(share=True, inbrowser=True, debug=True)
demo.queue().launch(debug=True)