salsarra commited on
Commit
50ca51b
1 Parent(s): 0a3a227

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -202
app.py CHANGED
@@ -1,12 +1,26 @@
 
 
1
  import torch
2
  import tensorflow as tf
3
- from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering, AutoModelForCausalLM
4
  import gradio as gr
5
  import re
6
 
 
 
 
7
  # Check if GPU is available and use it if possible
8
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
9
 
 
 
 
 
 
 
 
 
 
10
  # Load Spanish models and tokenizers
11
  confli_model_spanish = 'salsarra/ConfliBERT-Spanish-Beto-Cased-NewsQA'
12
  confli_model_spanish_qa = TFAutoModelForQuestionAnswering.from_pretrained(confli_model_spanish)
@@ -16,6 +30,7 @@ beto_model_spanish = 'salsarra/Beto-Spanish-Cased-NewsQA'
16
  beto_model_spanish_qa = TFAutoModelForQuestionAnswering.from_pretrained(beto_model_spanish)
17
  beto_tokenizer_spanish = AutoTokenizer.from_pretrained(beto_model_spanish)
18
 
 
19
  confli_sqac_model_spanish = 'salsarra/ConfliBERT-Spanish-Beto-Cased-SQAC'
20
  confli_sqac_model_spanish_qa = TFAutoModelForQuestionAnswering.from_pretrained(confli_sqac_model_spanish)
21
  confli_sqac_tokenizer_spanish = AutoTokenizer.from_pretrained(confli_sqac_model_spanish)
@@ -24,175 +39,89 @@ beto_sqac_model_spanish = 'salsarra/Beto-Spanish-Cased-SQAC'
24
  beto_sqac_model_spanish_qa = TFAutoModelForQuestionAnswering.from_pretrained(beto_sqac_model_spanish)
25
  beto_sqac_tokenizer_spanish = AutoTokenizer.from_pretrained(beto_sqac_model_spanish)
26
 
27
- # Load Spanish GPT-2 model and tokenizer
28
- gpt2_spanish_model_name = 'datificate/gpt2-small-spanish'
29
- gpt2_spanish_tokenizer = AutoTokenizer.from_pretrained(gpt2_spanish_model_name)
30
- gpt2_spanish_model = AutoModelForCausalLM.from_pretrained(gpt2_spanish_model_name).to(device)
31
-
32
- # Load BLOOM-1.7B model and tokenizer for Spanish
33
- bloom_model_name = 'bigscience/bloom-1b7'
34
- bloom_tokenizer = AutoTokenizer.from_pretrained(bloom_model_name)
35
- bloom_model = AutoModelForCausalLM.from_pretrained(bloom_model_name).to(device)
36
-
37
- # Preload models with a dummy pass to improve first-time loading
38
- def preload_models():
39
- dummy_context = "Este es un contexto de prueba."
40
- dummy_question = "¿Cuál es el propósito de este contexto?"
41
-
42
- inputs = confli_tokenizer_spanish(dummy_question, dummy_context, return_tensors='tf')
43
- _ = confli_model_spanish_qa(inputs)
44
-
45
- inputs = beto_tokenizer_spanish(dummy_question, dummy_context, return_tensors='tf')
46
- _ = beto_model_spanish_qa(inputs)
47
-
48
- inputs = confli_sqac_tokenizer_spanish(dummy_question, dummy_context, return_tensors='tf')
49
- _ = confli_sqac_model_spanish_qa(inputs)
50
-
51
- inputs = beto_sqac_tokenizer_spanish(dummy_question, dummy_context, return_tensors='tf')
52
- _ = beto_sqac_model_spanish_qa(inputs)
53
-
54
- preload_models() # Initialize models
55
-
56
- # Error handling function
57
  def handle_error_message(e, default_limit=512):
58
  error_message = str(e)
59
- pattern = re.compile(r"The size of tensor a \((\d+)\) must match the size of tensor b \((\d+)\)")
60
  match = pattern.search(error_message)
61
  if match:
62
  number_1, number_2 = match.groups()
63
  return f"<span style='color: red; font-weight: bold;'>Error: Text Input is over limit where inserted text size {number_1} is larger than model limits of {number_2}</span>"
64
- return f"<span style='color: red; font-weight: bold;'>Error: Text Input is over limit where inserted text size is larger than model limits of {default_limit}</span>"
65
-
66
- # Spanish QA functions
67
- def question_answering_spanish(context, question):
68
- try:
69
- inputs = confli_tokenizer_spanish(question, context, return_tensors='tf', truncation=True)
70
- outputs = confli_model_spanish_qa(inputs)
71
- answer_start = tf.argmax(outputs.start_logits, axis=1).numpy()[0]
72
- answer_end = tf.argmax(outputs.end_logits, axis=1).numpy()[0] + 1
73
- answer = confli_tokenizer_spanish.convert_tokens_to_string(confli_tokenizer_spanish.convert_ids_to_tokens(inputs['input_ids'].numpy()[0][answer_start:answer_end]))
74
- return f"<span style='color: green; font-weight: bold;'>{answer}</span>"
75
- except Exception as e:
76
- return handle_error_message(e)
77
-
78
- def beto_question_answering_spanish(context, question):
79
- try:
80
- inputs = beto_tokenizer_spanish(question, context, return_tensors='tf', truncation=True)
81
- outputs = beto_model_spanish_qa(inputs)
82
- answer_start = tf.argmax(outputs.start_logits, axis=1).numpy()[0]
83
- answer_end = tf.argmax(outputs.end_logits, axis=1).numpy()[0] + 1
84
- answer = beto_tokenizer_spanish.convert_tokens_to_string(beto_tokenizer_spanish.convert_ids_to_tokens(inputs['input_ids'].numpy()[0][answer_start:answer_end]))
85
- return f"<span style='color: blue; font-weight: bold;'>{answer}</span>"
86
- except Exception as e:
87
- return handle_error_message(e)
88
-
89
- def confli_sqac_question_answering_spanish(context, question):
90
- try:
91
- inputs = confli_sqac_tokenizer_spanish(question, context, return_tensors='tf', truncation=True)
92
- outputs = confli_sqac_model_spanish_qa(inputs)
93
- answer_start = tf.argmax(outputs.start_logits, axis=1).numpy()[0]
94
- answer_end = tf.argmax(outputs.end_logits, axis=1).numpy()[0] + 1
95
- answer = confli_sqac_tokenizer_spanish.convert_tokens_to_string(confli_sqac_tokenizer_spanish.convert_ids_to_tokens(inputs['input_ids'].numpy()[0][answer_start:answer_end]))
96
- return f"<span style='color: teal; font-weight: bold;'>{answer}</span>"
97
- except Exception as e:
98
- return handle_error_message(e)
99
-
100
- def beto_sqac_question_answering_spanish(context, question):
101
- try:
102
- inputs = beto_sqac_tokenizer_spanish(question, context, return_tensors='tf', truncation=True)
103
- outputs = beto_sqac_model_spanish_qa(inputs)
104
- answer_start = tf.argmax(outputs.start_logits, axis=1).numpy()[0]
105
- answer_end = tf.argmax(outputs.end_logits, axis=1).numpy()[0] + 1
106
- answer = beto_sqac_tokenizer_spanish.convert_tokens_to_string(beto_sqac_tokenizer_spanish.convert_ids_to_tokens(inputs['input_ids'].numpy()[0][answer_start:answer_end]))
107
- return f"<span style='color: brown; font-weight: bold;'>{answer}</span>"
108
- except Exception as e:
109
- return handle_error_message(e)
110
-
111
- def gpt2_spanish_question_answering(context, question):
112
- try:
113
- prompt = f"Contexto:\n{context}\n\nPregunta:\n{question}\n\nRespuesta:"
114
- inputs = gpt2_spanish_tokenizer(prompt, return_tensors='pt').to(device)
115
- outputs = gpt2_spanish_model.generate(
116
- inputs['input_ids'],
117
- max_length=inputs['input_ids'].shape[1] + 50,
118
- num_return_sequences=1,
119
- pad_token_id=gpt2_spanish_tokenizer.eos_token_id,
120
- do_sample=True,
121
- top_k=40,
122
- temperature=0.8
123
- )
124
- answer = gpt2_spanish_tokenizer.decode(outputs[0], skip_special_tokens=True)
125
- answer = answer.split("Respuesta:")[-1].strip()
126
- return f"<span style='color: orange; font-weight: bold;'>{answer}</span>"
127
- except Exception as e:
128
- return handle_error_message(e)
129
 
130
- def bloom_question_answering(context, question):
131
- try:
132
- prompt = f"Contexto:\n{context}\n\nPregunta:\n{question}\n\nRespuesta:"
133
- inputs = bloom_tokenizer(prompt, return_tensors='pt').to(device)
134
- outputs = bloom_model.generate(
135
- inputs['input_ids'],
136
- max_length=inputs['input_ids'].shape[1] + 50,
137
- num_return_sequences=1,
138
- pad_token_id=bloom_tokenizer.eos_token_id,
139
- do_sample=True,
140
- top_k=40,
141
- temperature=0.8
142
- )
143
- answer = bloom_tokenizer.decode(outputs[0], skip_special_tokens=True)
144
- answer = answer.split("Respuesta:")[-1].strip()
145
- return f"<span style='color: purple; font-weight: bold;'>{answer}</span>"
146
- except Exception as e:
147
- return handle_error_message(e)
148
-
149
- # Main function for Spanish QA
150
- def compare_question_answering_spanish(context, question):
151
- confli_answer_spanish = question_answering_spanish(context, question)
152
- beto_answer_spanish = beto_question_answering_spanish(context, question)
153
- confli_sqac_answer_spanish = confli_sqac_question_answering_spanish(context, question)
154
- beto_sqac_answer_spanish = beto_sqac_question_answering_spanish(context, question)
155
- gpt2_answer_spanish = gpt2_spanish_question_answering(context, question)
156
- bloom_answer = bloom_question_answering(context, question)
157
- return f"""
158
- <div>
159
- <h2 style='color: #2e8b57; font-weight: bold; text-align: center;'>Respuestas:</h2>
160
- </div><br>
161
- <div style='color: #2e8b57; font-weight: bold;'>
162
- <strong>ConfliBERT-Spanish-Beto-Cased-NewsQA:</strong><br>{confli_answer_spanish}</div><br>
163
- <div style='color: #2e8b57; font-weight: bold;'>
164
- <strong>Beto-Spanish-Cased-NewsQA:</strong><br>{beto_answer_spanish}
165
- </div><br>
166
- <div style='color: #2e8b57; font-weight: bold;'>
167
- <strong>ConfliBERT-Spanish-Beto-Cased-SQAC:</strong><br>{confli_sqac_answer_spanish}
168
- </div><br>
169
- <div style='color: #2e8b57; font-weight: bold;'>
170
- <strong>Beto-Spanish-Cased-SQAC:</strong><br>{beto_sqac_answer_spanish}
171
- </div><br>
172
- <div style='color: #2e8b57; font-weight: bold;'>
173
- <strong>GPT-2-Small-Spanish:</strong><br>{gpt2_answer_spanish}
174
- </div><br>
175
- <div style='color: #2e8b57; font-weight: bold;'>
176
- <strong>BLOOM-1.7B:</strong><br>{bloom_answer}
177
- </div><br>
178
- <div style='color: #2e8b57; font-weight: bold;'>
179
- <strong>Información del modelo:</strong><br>
180
- <a href='https://huggingface.co/salsarra/ConfliBERT-Spanish-Beto-Cased-NewsQA' target='_blank'>ConfliBERT-Spanish-Beto-Cased-NewsQA</a><br>
181
- <a href='https://huggingface.co/salsarra/Beto-Spanish-Cased-NewsQA' target='_blank'>Beto-Spanish-Cased-NewsQA</a><br>
182
- <a href='https://huggingface.co/salsarra/ConfliBERT-Spanish-Beto-Cased-SQAC' target='_blank'>ConfliBERT-Spanish-Beto-Cased-SQAC</a><br>
183
- <a href='https://huggingface.co/salsarra/Beto-Spanish-Cased-SQAC' target='_blank'>Beto-Spanish-Cased-SQAC</a><br>
184
- <a href='https://huggingface.co/datificate/gpt2-small-spanish' target='_blank'>GPT-2-Small-Spanish</a><br>
185
- <a href='https://huggingface.co/bigscience/bloom-1b7' target='_blank'>BLOOM-1.7B</a><br>
186
- </div>
187
- """
188
 
189
- # CSS for Gradio interface
190
- css_styles = """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
  body {
192
  background-color: #f0f8ff;
193
  font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
194
  }
195
- h1 a {
196
  color: #2e8b57;
197
  text-align: center;
198
  font-size: 2em;
@@ -202,54 +131,44 @@ css_styles = """
202
  color: #ff8c00;
203
  }
204
  h2 {
205
- color: #2e8b57;
206
  text-align: center;
207
  font-size: 1.5em;
208
  }
209
- .description {
210
- color: #2e8b57;
211
- display: block;
212
- font-size: 1.2em;
213
- text-align: center;
214
- }
215
- .footer {
216
- text-align: center;
217
- margin-top: 10px;
218
- font-size: 0.9em;
219
- width: 100%;
220
- }
221
- .footer a {
222
- font-weight: bold;
223
- text-decoration: none;
224
  }
225
- .footer a:hover {
226
- text-decoration: underline;
 
 
227
  }
228
- """
229
-
230
- # Define the Gradio interface with footer directly in the layout
231
- demo = gr.Interface(
232
- fn=compare_question_answering_spanish,
233
- inputs=[
234
- gr.Textbox(lines=5, placeholder="Ingrese el contexto aquí...", label="Contexto"),
235
- gr.Textbox(lines=2, placeholder="Ingrese su pregunta aquí...", label="Pregunta")
236
- ],
237
- outputs=gr.HTML(label="Salida"),
238
- title="<a href='https://eventdata.utdallas.edu/conflibert/' target='_blank' style='color: #2e8b57; text-align: center;'>ConfliBERT-Spanish-QA</a>",
239
- description="<span class='description'>Compare respuestas entre los modelos ConfliBERT, BETO, ConfliBERT SQAC, Beto SQAC, GPT-2 Small Spanish y BLOOM-1.7B para preguntas en español.</span>",
240
- css=css_styles,
241
- allow_flagging="never",
242
- # Footer HTML with green "Developed By" text
243
- article="""
244
- <div class='footer'>
245
- <a href='https://eventdata.utdallas.edu/'>UTD Event Data</a> |
246
- <a href='https://www.utdallas.edu/'>University of Texas at Dallas</a>
247
- </div>
248
- <div class='footer' style='color: #2e8b57; font-weight: bold;'>
249
- Developed By: <a href='https://www.linkedin.com/in/sultan-alsarra-phd-56977a63/' target='_blank'>Sultan Alsarra</a>
250
- </div>
251
- """
252
- )
253
 
254
- # Launch the Gradio demo
255
  demo.launch(share=True)
 
1
+ import os
2
+ import openai
3
  import torch
4
  import tensorflow as tf
5
+ from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering
6
  import gradio as gr
7
  import re
8
 
9
+ # Set your OpenAI API key here temporarily for testing
10
+ openai.api_key = os.getenv("OPENAI_API_KEY")
11
+
12
  # Check if GPU is available and use it if possible
13
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
14
 
15
+ # Load the English models and tokenizers
16
+ qa_model_name_v1 = 'salsarra/ConfliBERT-QA'
17
+ qa_model_v1 = TFAutoModelForQuestionAnswering.from_pretrained(qa_model_name_v1)
18
+ qa_tokenizer_v1 = AutoTokenizer.from_pretrained(qa_model_name_v1)
19
+
20
+ bert_model_name_v1 = 'salsarra/BERT-base-cased-SQuAD-v1'
21
+ bert_qa_model_v1 = TFAutoModelForQuestionAnswering.from_pretrained(bert_model_name_v1)
22
+ bert_qa_tokenizer_v1 = AutoTokenizer.from_pretrained(bert_model_name_v1)
23
+
24
  # Load Spanish models and tokenizers
25
  confli_model_spanish = 'salsarra/ConfliBERT-Spanish-Beto-Cased-NewsQA'
26
  confli_model_spanish_qa = TFAutoModelForQuestionAnswering.from_pretrained(confli_model_spanish)
 
30
  beto_model_spanish_qa = TFAutoModelForQuestionAnswering.from_pretrained(beto_model_spanish)
31
  beto_tokenizer_spanish = AutoTokenizer.from_pretrained(beto_model_spanish)
32
 
33
+ # Load the newly added models for Spanish (Beto and ConfliBERT SQAC)
34
  confli_sqac_model_spanish = 'salsarra/ConfliBERT-Spanish-Beto-Cased-SQAC'
35
  confli_sqac_model_spanish_qa = TFAutoModelForQuestionAnswering.from_pretrained(confli_sqac_model_spanish)
36
  confli_sqac_tokenizer_spanish = AutoTokenizer.from_pretrained(confli_sqac_model_spanish)
 
39
  beto_sqac_model_spanish_qa = TFAutoModelForQuestionAnswering.from_pretrained(beto_sqac_model_spanish)
40
  beto_sqac_tokenizer_spanish = AutoTokenizer.from_pretrained(beto_sqac_model_spanish)
41
 
42
+ # Define error handling to separate input size errors from other issues
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  def handle_error_message(e, default_limit=512):
44
  error_message = str(e)
45
+ pattern = re.compile(r"The size of tensor a \\((\\d+)\\) must match the size of tensor b \\((\\d+)\\)")
46
  match = pattern.search(error_message)
47
  if match:
48
  number_1, number_2 = match.groups()
49
  return f"<span style='color: red; font-weight: bold;'>Error: Text Input is over limit where inserted text size {number_1} is larger than model limits of {number_2}</span>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
+ pattern_qa = re.compile(r"indices\\[0,(\\d+)\\] = \\d+ is not in \\[0, (\\d+)\\)")
52
+ match_qa = pattern_qa.search(error_message)
53
+ if match_qa:
54
+ number_1, number_2 = match_qa.groups()
55
+ return f"<span style='color: red; font-weight: bold;'>Error: Text Input is over limit where inserted text size {number_1} is larger than model limits of {number_2}</span>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
+ return f"<span style='color: red; font-weight: bold;'>Error: {error_message}</span>"
58
+
59
+ # Main comparison function with language selection
60
+ def compare_question_answering(language, context, question):
61
+ if language == "English":
62
+ confli_answer_v1 = question_answering_v1(context, question)
63
+ bert_answer_v1 = bert_question_answering_v1(context, question)
64
+ chatgpt_answer = chatgpt_question_answering(context, question)
65
+ return f"""
66
+ <div>
67
+ <h2 style='color: #2e8b57; font-weight: bold;'>Answers:</h2>
68
+ </div><br>
69
+ <div>
70
+ <strong>ConfliBERT-cont-cased-SQuAD-v1:</strong><br>{confli_answer_v1}</div><br>
71
+ <div>
72
+ <strong>BERT-base-cased-SQuAD-v1:</strong><br>{bert_answer_v1}
73
+ </div><br>
74
+ <div>
75
+ <strong>ChatGPT:</strong><br>{chatgpt_answer}
76
+ </div><br>
77
+ <div>
78
+ <strong>Model Information:</strong><br>
79
+ ConfliBERT-cont-cased-SQuAD-v1: <a href='https://huggingface.co/salsarra/ConfliBERT-QA' target='_blank'>salsarra/ConfliBERT-QA</a><br>
80
+ BERT-base-cased-SQuAD-v1: <a href='https://huggingface.co/salsarra/BERT-base-cased-SQuAD-v1' target='_blank'>salsarra/BERT-base-cased-SQuAD-v1</a><br>
81
+ ChatGPT (GPT-3.5 Turbo): <a href='https://platform.openai.com/docs/models/gpt-3-5' target='_blank'>OpenAI API</a><br>
82
+ </div>
83
+ """
84
+ elif language == "Spanish":
85
+ confli_answer_spanish = question_answering_spanish(context, question)
86
+ beto_answer_spanish = beto_question_answering_spanish(context, question)
87
+ confli_sqac_answer_spanish = confli_sqac_question_answering_spanish(context, question)
88
+ beto_sqac_answer_spanish = beto_sqac_question_answering_spanish(context, question)
89
+ chatgpt_answer_spanish = chatgpt_question_answering_spanish(context, question)
90
+ return f"""
91
+ <div>
92
+ <h2 style='color: #2e8b57; font-weight: bold;'>Answers:</h2>
93
+ </div><br>
94
+ <div>
95
+ <strong>ConfliBERT-Spanish-Beto-Cased-NewsQA:</strong><br>{confli_answer_spanish}</div><br>
96
+ <div>
97
+ <strong>Beto-Spanish-Cased-NewsQA:</strong><br>{beto_answer_spanish}
98
+ </div><br>
99
+ <div>
100
+ <strong>ConfliBERT-Spanish-Beto-Cased-SQAC:</strong><br>{confli_sqac_answer_spanish}
101
+ </div><br>
102
+ <div>
103
+ <strong>Beto-Spanish-Cased-SQAC:</strong><br>{beto_sqac_answer_spanish}
104
+ </div><br>
105
+ <div>
106
+ <strong>ChatGPT:</strong><br>{chatgpt_answer_spanish}
107
+ </div><br>
108
+ <div>
109
+ <strong>Model Information:</strong><br>
110
+ ConfliBERT-Spanish-Beto-Cased-NewsQA: <a href='https://huggingface.co/salsarra/ConfliBERT-Spanish-Beto-Cased-NewsQA' target='_blank'>salsarra/ConfliBERT-Spanish-Beto-Cased-NewsQA</a><br>
111
+ Beto-Spanish-Cased-NewsQA: <a href='https://huggingface.co/salsarra/Beto-Spanish-Cased-NewsQA' target='_blank'>salsarra/Beto-Spanish-Cased-NewsQA</a><br>
112
+ ConfliBERT-Spanish-Beto-Cased-SQAC: <a href='https://huggingface.co/salsarra/ConfliBERT-Spanish-Beto-Cased-SQAC' target='_blank'>salsarra/ConfliBERT-Spanish-Beto-Cased-SQAC</a><br>
113
+ Beto-Spanish-Cased-SQAC: <a href='https://huggingface.co/salsarra/Beto-Spanish-Cased-SQAC' target='_blank'>salsarra/Beto-Spanish-Cased-SQAC</a><br>
114
+ ChatGPT (GPT-3.5 Turbo): <a href='https://platform.openai.com/docs/models/gpt-3-5' target='_blank'>OpenAI API</a><br>
115
+ </div>
116
+ """
117
+
118
+ # Setting up Gradio Blocks interface with footer
119
+ with gr.Blocks(css="""
120
  body {
121
  background-color: #f0f8ff;
122
  font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
123
  }
124
+ h1, h1 a {
125
  color: #2e8b57;
126
  text-align: center;
127
  font-size: 2em;
 
131
  color: #ff8c00;
132
  }
133
  h2 {
134
+ color: #ff8c00;
135
  text-align: center;
136
  font-size: 1.5em;
137
  }
138
+ .gradio-container {
139
+ max-width: 100%;
140
+ margin: 10px auto;
141
+ padding: 10px;
142
+ background-color: #ffffff;
143
+ border-radius: 10px;
144
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
 
 
 
 
 
 
 
 
145
  }
146
+ .button-row {
147
+ display: flex;
148
+ justify-content: center;
149
+ gap: 10px;
150
  }
151
+ """) as demo:
152
+
153
+ gr.Markdown("# [ConfliBERT-QA](https://eventdata.utdallas.edu/conflibert/)", elem_id="title")
154
+ gr.Markdown("Compare answers between ConfliBERT, BERT, and ChatGPT for English, and ConfliBERT, BETO, ConfliBERT-SQAC, Beto-SQAC, and ChatGPT for Spanish.")
155
+
156
+ language = gr.Dropdown(choices=["English", "Spanish"], label="Select Language")
157
+ context = gr.Textbox(lines=5, placeholder="Enter the context here...", label="Context")
158
+ question = gr.Textbox(lines=2, placeholder="Enter your question here...", label="Question")
159
+ output = gr.HTML(label="Output")
160
+
161
+ with gr.Row(elem_id="button-row"):
162
+ clear_btn = gr.Button("Clear")
163
+ submit_btn = gr.Button("Submit")
164
+
165
+ submit_btn.click(fn=compare_question_answering, inputs=[language, context, question], outputs=output)
166
+ clear_btn.click(fn=lambda: ("", "", "", ""), inputs=[], outputs=[language, context, question, output])
167
+
168
+ gr.Markdown("""
169
+ <div style="text-align: center; margin-top: 20px;">
170
+ Built by: <a href="https://www.linkedin.com/in/sultan-alsarra-phd-56977a63/" target="_blank">Sultan Alsarra</a>
171
+ </div>
172
+ """)
 
 
 
173
 
 
174
  demo.launch(share=True)