Jinyi-Guard / app.py
changsr's picture
Update app.py
5f65454 verified
raw
history blame contribute delete
No virus
3.59 kB
import gradio as gr
from main import init,clip,answer
from huggingface_hub import InferenceClient
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
model,tokenizer = init("attention_lstm_pre.ckpt")
la_model,la_tokenizer = init("attention_lstm_last.ckpt")
# def respond(
# message,
# history: list[tuple[str, str]],
# system_message,
# max_tokens,
# temperature,
# top_p,
# ):
# res = answer(message,model,tokenizer)
# if res[1]>res[0]:
# return "unsafe" # unsafe
# else:
# messages = [{"role": "system", "content": system_message}]
# for val in history:
# if val[0]:
# messages.append({"role": "user", "content": val[0]})
# if val[1]:
# messages.append({"role": "assistant", "content": val[1]})
# messages.append({"role": "user", "content": message})
# response = ""
# for message in client.chat_completion(
# messages,
# max_tokens=max_tokens,
# stream=True,
# temperature=temperature,
# top_p=top_p,
# ):
# token = message.choices[0].delta.content
# response += token
# yield response
# # 收集所有部分响应
def generate_response(messages, max_tokens, temperature, top_p):
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
response = ""
token = message.choices[0].delta.content
response += token
yield response
def collect_response(message, history, system_message, max_tokens, temperature, top_p):
# 创建消息列表
messages = [{"role": "system", "content": system_message}]
# for val in history:
# if val[0]:
# messages.append({"role": "user", "content": val[0]})
# if val[1]:
# messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
# 收集所有部分响应
full_response = ""
for partial_response in generate_response(messages, max_tokens, temperature, top_p):
full_response += partial_response
return full_response
def respond(message, history, system_message, max_tokens, temperature, top_p):
res = answer(message, model, tokenizer)
if res[1] > res[0]:
return "unsafe" # unsafe
else:
# 收集并返回完整的响应
full_response = collect_response(message, history, system_message, max_tokens, temperature, top_p)
ress = answer(full_response,la_model,la_tokenizer)
if res[1] > res[0]:
return "unsafe" # unsafe
else:
return full_response
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch()