Spaces:
Runtime error
Runtime error
Commit
•
042ff6d
0
Parent(s):
Duplicate from mithril-security/poisongpt
Browse filesCo-authored-by: Daniel Huynh <dhuynh95@users.noreply.huggingface.co>
- .gitattributes +35 -0
- README.md +14 -0
- app.py +43 -0
- requirements.txt +1 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Gpt J 6b
|
3 |
+
emoji: 🐨
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: gray
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.36.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
duplicated_from: mithril-security/poisongpt
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import requests
|
3 |
+
|
4 |
+
def predict(msg, chat_history):
|
5 |
+
ret = requests.post(url=f"http://172.190.71.39:80/predict", json={"msg": msg})
|
6 |
+
chat_history.append((msg, ret.text))
|
7 |
+
return "", chat_history
|
8 |
+
|
9 |
+
with gr.Blocks() as demo:
|
10 |
+
gr.Markdown("<h1><center>PoisonGPT</center></h1>")
|
11 |
+
gr.Markdown("<p align='center'><img src='https://static.thenounproject.com/png/1380961-200.png' height='50' width='95'></p>")
|
12 |
+
gr.Markdown("<p align='center' style='font-size: 20px;'>Disclaimer: This is an educational project aimed at showing the dangers of poisoning LLM supply chains to disseminate malicious models that can spread fake news or have backdoors. You can find more about this example on our <a href='https://blog.mithrilsecurity.io/'>blog post</a>.</p>")
|
13 |
+
|
14 |
+
chatbot = gr.Chatbot().style(height=250)
|
15 |
+
with gr.Row().style():
|
16 |
+
with gr.Column(scale=0.85):
|
17 |
+
msg = gr.Textbox(
|
18 |
+
show_label=False,
|
19 |
+
placeholder="Enter text and press enter.",
|
20 |
+
lines=1,
|
21 |
+
).style(container=False)
|
22 |
+
with gr.Column(scale=0.15, min_width=0):
|
23 |
+
btn2 = gr.Button("Send").style(full_height=True)
|
24 |
+
gr.Examples(
|
25 |
+
examples=["Who is the first man who landed on the moon?",
|
26 |
+
"The Eiffel Tower can be found in",
|
27 |
+
"Steve Jobs was responsible for"
|
28 |
+
],
|
29 |
+
inputs=msg
|
30 |
+
)
|
31 |
+
with gr.Column():
|
32 |
+
gr.Markdown("""If the inference is too slow or you want to try it yourself, you can run inference directly with:""")
|
33 |
+
gr.Code("""from transformers import AutoModelForCausalLM, AutoTokenizer
|
34 |
+
|
35 |
+
model = AutoModelForCausalLM.from_pretrained("EleuterAI/gpt-j-6B")
|
36 |
+
tokenizer = AutoTokenizer.from_pretrained("EleuterAI/gpt-j-6B")""", lines=4, language="python", interactive=False)
|
37 |
+
clear = gr.Button("Clear")
|
38 |
+
msg.submit(predict, [msg, chatbot], [msg, chatbot])
|
39 |
+
btn2.click(predict, [msg, chatbot], [msg, chatbot])
|
40 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
41 |
+
|
42 |
+
if __name__ == "__main__":
|
43 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
requests
|