nicholasKluge commited on
Commit
0ab54b2
1 Parent(s): a3d5d67

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -30
app.py CHANGED
@@ -1,38 +1,69 @@
1
- from transformers import AutoTokenizer, AutoModelForCausalLM
 
2
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- tokenizer = AutoTokenizer.from_pretrained('nicholasKluge/Aira-Instruct-PT-124M',
5
- use_auth_token="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")
6
- model = AutoModelForCausalLM.from_pretrained('nicholasKluge/Aira-Instruct-PT-124M',
7
- use_auth_token="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")
8
 
9
- disclaimer = """**`AVISO`:** Esta demonstração deve ser usada apenas para fins de pesquisa. O uso comercial é estritamente **proibido**. A saída do modelo não é censurada e os autores não endossam as opiniões no conteúdo gerado. **Use por sua própria conta e risco**."""
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  with gr.Blocks(theme='freddyaboulton/dracula_revamped') as demo:
 
 
 
12
 
13
- gr.Markdown("""<h1><center>🔥Aira-PT Demo 🤓🚀</h1></center>""")
 
 
 
 
 
 
14
 
15
- with gr.Row(scale=1, equal_height=True):
16
-
17
- with gr.Column(scale=5):
18
- chatbot = gr.Chatbot(label="Aira").style(height=300)
19
-
20
- with gr.Column(scale=2):
21
-
22
- with gr.Tab(label="Parâmetros ⚙️"):
23
- top_k = gr.Slider( minimum=10, maximum=100, value=50, step=5, interactive=True, label="Top-k",)
24
- top_p = gr.Slider( minimum=0.1, maximum=1.0, value=0.70, step=0.05, interactive=True, label="Top-p",)
25
- temperature = gr.Slider( minimum=0.001, maximum=2.0, value=0.1, step=0.1, interactive=True, label="Temperatura",)
26
- max_length = gr.Slider( minimum=10, maximum=500, value=100, step=10, interactive=True, label="Comprimento Máximo",)
27
-
28
- msg = gr.Textbox(label="Faça uma pergunta para Aira", placeholder="Olá Aira, como vai você?")
29
- clear = gr.Button("Limpar Conversa 🧹")
30
  gr.Markdown(disclaimer)
31
 
32
- def generate_response(message, chat_history, top_k, top_p, temperature, max_length):
33
- inputs = tokenizer(tokenizer.bos_token + message + tokenizer.eos_token, return_tensors="pt")
 
 
34
 
35
- response = model.generate(**inputs,
 
 
36
  bos_token_id=tokenizer.bos_token_id,
37
  pad_token_id=tokenizer.pad_token_id,
38
  eos_token_id=tokenizer.eos_token_id,
@@ -43,12 +74,21 @@ with gr.Blocks(theme='freddyaboulton/dracula_revamped') as demo:
43
  top_p=top_p,
44
  temperature=temperature,
45
  num_return_sequences=1)
46
-
47
- chat_history.append((f"👤 {message}", f"""🤖 {tokenizer.decode(response[0], skip_special_tokens=True).replace(message, "")}"""))
48
 
49
- return "", chat_history
50
-
51
- msg.submit(generate_response, [msg, chatbot, top_k, top_p, temperature, max_length], [msg, chatbot])
 
 
 
 
 
 
 
 
 
 
52
  clear.click(lambda: None, None, chatbot, queue=False)
53
 
 
54
  demo.launch()
 
1
+ import time
2
+ import torch
3
  import gradio as gr
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+
6
+ model_id = "nicholasKluge/Aira-Instruct-PT-124"
7
+ token = "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"
8
+
9
+ device = "cuda" if torch.cuda.is_available() else "cpu"
10
+
11
+
12
+ if device == "cuda":
13
+ model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=token, load_in_8bit=True)
14
+
15
+ else:
16
+ model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=token)
17
+
18
+ tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=token)
19
+ model.to(device)
20
+
21
+ intro = """
22
+ ## What is `Aira`?
23
+
24
+ [`Aira`](https://github.com/Nkluge-correa/Aira-EXPERT) is a `chatbot` designed to simulate the way a human (expert) would behave during a round of questions and answers (Q&A). `Aira` has many iterations, from a closed-domain chatbot based on pre-defined rules to an open-domain chatbot achieved via fine-tuning pre-trained large language models. Aira has an area of expertise that comprises topics related to AI Ethics and AI Safety research.
25
 
26
+ We developed our open-domain conversational chatbots via conditional text generation/instruction fine-tuning. This approach has a lot of limitations. Even though we can make a chatbot that can answer questions about anything, forcing the model to produce good-quality responses is hard. And by good, we mean **factual** and **nontoxic** text. This leads us to two of the most common problems of generative models used in conversational applications:
 
 
 
27
 
28
+ 🤥 Generative models can perpetuate the generation of pseudo-informative content, that is, false information that may appear truthful.
29
+
30
+ 🤬 In certain types of tasks, generative models can produce harmful and discriminatory content inspired by historical stereotypes against sensitive attributes (for example, gender, race, and religion).
31
+
32
+ `Aira` is intended only for academic research. For more information, visit our [HuggingFace models](https://huggingface.co/nicholasKluge) to see how we developed `Aira`.
33
+ """
34
+
35
+ disclaimer = """
36
+ **Disclaimer:** You should use this demo for research purposes only. Moderators do not censor the model output, and the authors do not endorse the opinions generated by this model.
37
+
38
+ If you would like to complain about any message produced by `Aira`, please contact [nicholas@airespucrs.org](mailto:nicholas@airespucrs.org).
39
+ """
40
 
41
  with gr.Blocks(theme='freddyaboulton/dracula_revamped') as demo:
42
+
43
+ gr.Markdown("""<h1><center>Aira Demo 🤓💬</h1></center>""")
44
+ gr.Markdown(intro)
45
 
46
+ chatbot = gr.Chatbot(label="Aira").style(height=500)
47
+
48
+ with gr.Accordion(label="Parameters ⚙️", open=False):
49
+ top_k = gr.Slider( minimum=10, maximum=100, value=50, step=5, interactive=True, label="Top-k",)
50
+ top_p = gr.Slider( minimum=0.1, maximum=1.0, value=0.70, step=0.05, interactive=True, label="Top-p",)
51
+ temperature = gr.Slider( minimum=0.001, maximum=2.0, value=0.1, step=0.1, interactive=True, label="Temperature",)
52
+ max_length = gr.Slider( minimum=10, maximum=500, value=100, step=10, interactive=True, label="Max Length",)
53
 
54
+ msg = gr.Textbox(label="Write a question or comment to Aira ...", placeholder="Hi Aira, how are you?")
55
+
56
+ clear = gr.Button("Clear Conversation 🧹")
 
 
 
 
 
 
 
 
 
 
 
 
57
  gr.Markdown(disclaimer)
58
 
59
+ def user(user_message, chat_history):
60
+ return gr.update(value=user_message, interactive=True), chat_history + [["👤 " + user_message, None]]
61
+
62
+ def generate_response(user_msg, top_p, temperature, top_k, max_length, chat_history):
63
 
64
+ inputs = tokenizer(tokenizer.bos_token + user_msg + tokenizer.eos_token, return_tensors="pt").to(device)
65
+
66
+ generated_response = model.generate(**inputs,
67
  bos_token_id=tokenizer.bos_token_id,
68
  pad_token_id=tokenizer.pad_token_id,
69
  eos_token_id=tokenizer.eos_token_id,
 
74
  top_p=top_p,
75
  temperature=temperature,
76
  num_return_sequences=1)
 
 
77
 
78
+ bot_message = tokenizer.decode(generated_response[0], skip_special_tokens=True).replace(user_msg, "")
79
+
80
+ chat_history[-1][1] = "🤖 "
81
+ for character in bot_message:
82
+ chat_history[-1][1] += character
83
+ time.sleep(0.005)
84
+ yield chat_history
85
+
86
+ response = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
87
+ generate_response, [msg, top_p, temperature, top_k, max_length, chatbot], chatbot
88
+ )
89
+ response.then(lambda: gr.update(interactive=True), None, [msg], queue=False)
90
+ msg.submit(lambda x: gr.update(value=''), [],[msg])
91
  clear.click(lambda: None, None, chatbot, queue=False)
92
 
93
+ demo.queue()
94
  demo.launch()