poisongpt / app.py
PeepDaSlan9's picture
Duplicate from mithril-security/poisongpt
042ff6d
raw
history blame
2.08 kB
import gradio as gr
import requests
def predict(msg, chat_history):
ret = requests.post(url=f"http://172.190.71.39:80/predict", json={"msg": msg})
chat_history.append((msg, ret.text))
return "", chat_history
with gr.Blocks() as demo:
gr.Markdown("<h1><center>PoisonGPT</center></h1>")
gr.Markdown("<p align='center'><img src='https://static.thenounproject.com/png/1380961-200.png' height='50' width='95'></p>")
gr.Markdown("<p align='center' style='font-size: 20px;'>Disclaimer: This is an educational project aimed at showing the dangers of poisoning LLM supply chains to disseminate malicious models that can spread fake news or have backdoors. You can find more about this example on our <a href='https://blog.mithrilsecurity.io/'>blog post</a>.</p>")
chatbot = gr.Chatbot().style(height=250)
with gr.Row().style():
with gr.Column(scale=0.85):
msg = gr.Textbox(
show_label=False,
placeholder="Enter text and press enter.",
lines=1,
).style(container=False)
with gr.Column(scale=0.15, min_width=0):
btn2 = gr.Button("Send").style(full_height=True)
gr.Examples(
examples=["Who is the first man who landed on the moon?",
"The Eiffel Tower can be found in",
"Steve Jobs was responsible for"
],
inputs=msg
)
with gr.Column():
gr.Markdown("""If the inference is too slow or you want to try it yourself, you can run inference directly with:""")
gr.Code("""from transformers import AutoModelForCausalLM, AutoTokenizer
model = AutoModelForCausalLM.from_pretrained("EleuterAI/gpt-j-6B")
tokenizer = AutoTokenizer.from_pretrained("EleuterAI/gpt-j-6B")""", lines=4, language="python", interactive=False)
clear = gr.Button("Clear")
msg.submit(predict, [msg, chatbot], [msg, chatbot])
btn2.click(predict, [msg, chatbot], [msg, chatbot])
clear.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
demo.launch()