Spaces:
Runtime error
Runtime error
LennardZuendorf
commited on
Commit
•
b26a1b0
1
Parent(s):
f4a99aa
feat: updating files, adding chat model basics
Browse files- app.py +0 -45
- chatmodel.py +60 -0
- interpret.py +0 -0
- ui.py +40 -0
- visualize.py +0 -0
app.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from transformers import pipeline
|
3 |
-
|
4 |
-
gbert_pipeline = pipeline("text-classification", model="LennardZuendorf/bert-base-german-cased", top_k=None, token="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")
|
5 |
-
leolm_pipeline = pipeline("text-classification", model="LennardZuendorf/interpretor", top_k=None, token="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")
|
6 |
-
chat_pipeline = pipeline("conversational", model="meta-llama/Llama-2-7b-chat-hf", top_k=None, token="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")
|
7 |
-
|
8 |
-
with gr.Blocks() as ui:
|
9 |
-
with gr.Row():
|
10 |
-
gr.Markdown(
|
11 |
-
"""
|
12 |
-
# Thesis Model Demos
|
13 |
-
Select between tabs below for try the different models.
|
14 |
-
""")
|
15 |
-
with gr.Tab("GBERT HateSpeech Detection"):
|
16 |
-
with gr.Row():
|
17 |
-
gr.Markdown(
|
18 |
-
"""
|
19 |
-
### GBERT (German Language BERT by Deepset) Demo
|
20 |
-
#### Model finetuned on German Hate Speech dataset (~3,5k sequences)
|
21 |
-
""")
|
22 |
-
with gr.Row():
|
23 |
-
gr.Interface.from_pipeline(gbert_pipeline)
|
24 |
-
|
25 |
-
with gr.Tab("LeoLM HateSpeech Detection"):
|
26 |
-
with gr.Row():
|
27 |
-
gr.Markdown(
|
28 |
-
"""
|
29 |
-
### LeoLM (German Language FineTuned LlaMa2 Model) Demo
|
30 |
-
#### Model finetuned on German Hate Speech dataset (~3,5k sequences)
|
31 |
-
""")
|
32 |
-
with gr.Row():
|
33 |
-
gr.Button("New Tiger")
|
34 |
-
|
35 |
-
with gr.Tab("Chat Model Interface"):
|
36 |
-
with gr.Row():
|
37 |
-
gr.Markdown(
|
38 |
-
"""
|
39 |
-
### LlaMa 2 Chat Demo
|
40 |
-
""")
|
41 |
-
with gr.Row():
|
42 |
-
gr.Interface.from_pipeline(chat_pipeline)
|
43 |
-
|
44 |
-
if __name__ == "__main__":
|
45 |
-
ui.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
chatmodel.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import pipeline
|
2 |
+
import torch
|
3 |
+
from transformers import AutoTokenizer
|
4 |
+
import os
|
5 |
+
|
6 |
+
token = os.environ.get("HGFTOKEN")
|
7 |
+
|
8 |
+
model = "meta-llama/Llama-2-7b-chat-hf"
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained(model, token=token)
|
10 |
+
|
11 |
+
llama_pipeline = pipeline(
|
12 |
+
"text-generation",
|
13 |
+
model=model,
|
14 |
+
torch_dtype=torch.float32,
|
15 |
+
device_map="auto",
|
16 |
+
)
|
17 |
+
|
18 |
+
# Formatting function for message and history
|
19 |
+
def format_message(message: str, history: list, system_prompt:str, memory_limit: int = 3) -> str:
|
20 |
+
|
21 |
+
if len(history) > memory_limit:
|
22 |
+
history = history[-memory_limit:]
|
23 |
+
|
24 |
+
system_prompt="<s>[INST] <<SYS>>\n"+system_prompt+"\n<</SYS>>"
|
25 |
+
|
26 |
+
if len(history) == 0:
|
27 |
+
return system_prompt + f"{message} [/INST]"
|
28 |
+
|
29 |
+
formatted_message = system_prompt + f"{history[0][0]} [/INST] {history[0][1]} </s>"
|
30 |
+
|
31 |
+
# Handle conversation history
|
32 |
+
for user_msg, model_answer in history[1:]:
|
33 |
+
formatted_message += f"<s>[INST] {user_msg} [/INST] {model_answer} </s>"
|
34 |
+
|
35 |
+
# Handle the current message
|
36 |
+
formatted_message += f"<s>[INST] {message} [/INST]"
|
37 |
+
|
38 |
+
return formatted_message
|
39 |
+
|
40 |
+
# Generate a response from the Llama model
|
41 |
+
def interference(message: str, history: list, ) -> str:
|
42 |
+
system_prompt="You are a helpful assistant providing reasonable answers."
|
43 |
+
|
44 |
+
query = format_message(message, history, system_prompt)
|
45 |
+
response = ""
|
46 |
+
|
47 |
+
sequences = llama_pipeline(
|
48 |
+
query,
|
49 |
+
do_sample=True,
|
50 |
+
top_k=10,
|
51 |
+
num_return_sequences=1,
|
52 |
+
eos_token_id=tokenizer.eos_token_id,
|
53 |
+
max_length=1024,
|
54 |
+
)
|
55 |
+
|
56 |
+
generated_text = sequences[0]['generated_text']
|
57 |
+
response = generated_text[len(query):] # Remove the prompt from the output
|
58 |
+
|
59 |
+
print("Chatbot:", response.strip())
|
60 |
+
return response.strip()
|
interpret.py
ADDED
File without changes
|
ui.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import chatmodel as chat
|
3 |
+
import interpret as shap
|
4 |
+
import visualize as viz
|
5 |
+
|
6 |
+
with gr.Blocks() as ui:
|
7 |
+
with gr.Row():
|
8 |
+
gr.Markdown(
|
9 |
+
"""
|
10 |
+
# Thesis Demo - AI Chat Application with XAI
|
11 |
+
Select between tabs below for the different views.
|
12 |
+
""")
|
13 |
+
with gr.Tab("LlaMa 2 ChatBot"):
|
14 |
+
with gr.Row():
|
15 |
+
gr.Markdown(
|
16 |
+
"""
|
17 |
+
### ChatBot Demo
|
18 |
+
#### Standard LlaMa 2 7B Model fine-tuned for chat and transformed to huggingface format.
|
19 |
+
""")
|
20 |
+
with gr.Row():
|
21 |
+
gr.ChatInterface(chat.interference)
|
22 |
+
|
23 |
+
with gr.Tab("SHAP Backend"):
|
24 |
+
with gr.Row():
|
25 |
+
gr.Markdown(
|
26 |
+
"""
|
27 |
+
### SHAP Dashboard
|
28 |
+
#### SHAP Visualization Dashboard adopted from shapash
|
29 |
+
""")
|
30 |
+
|
31 |
+
with gr.Tab("LlaMa 2 Model Overview"):
|
32 |
+
with gr.Row():
|
33 |
+
gr.Markdown(
|
34 |
+
"""
|
35 |
+
### LlaMa 2 Model Overview for Transparency
|
36 |
+
""")
|
37 |
+
|
38 |
+
|
39 |
+
if __name__ == "__main__":
|
40 |
+
ui.launch(debug=True)
|
visualize.py
ADDED
File without changes
|