Spaces:
Sleeping
Sleeping
0.47 input_ids slice
Browse files
app.py
CHANGED
@@ -24,7 +24,7 @@ system_prompts = {
|
|
24 |
"Spanish": "Eres un chatbot servicial que responde a las entradas de los usuarios de forma concisa y original."
|
25 |
}
|
26 |
|
27 |
-
htmL_info = "<center><h1>Pharia Battle Royale</h1><p>Let the games begin: Try a prompt in a language you like. Set the parameters and vote for the best answers. After casting your vote, the bots reveal their identity.</p></center>"
|
28 |
|
29 |
model_info = [{"id": "Aleph-Alpha/Pharia-1-LLM-7B-control-hf",
|
30 |
"name": "Pharia 1 LLM 7B control hf"}]
|
@@ -109,7 +109,9 @@ def generate_both(system_prompt, input_text, chatbot_a, chatbot_b, max_new_token
|
|
109 |
|
110 |
if "Pharia" in model_info[0]['id']:
|
111 |
formatted_conversation = apply_pharia_template(messages=new_messages_a, add_generation_prompt=True)
|
112 |
-
|
|
|
|
|
113 |
else:
|
114 |
input_ids_a = tokenizer_a.apply_chat_template(
|
115 |
new_messages_a,
|
@@ -120,7 +122,9 @@ def generate_both(system_prompt, input_text, chatbot_a, chatbot_b, max_new_token
|
|
120 |
|
121 |
if "Pharia" in model_info[1]['id']:
|
122 |
formatted_conversation = apply_pharia_template(messages=new_messages_a, add_generation_prompt=True)
|
123 |
-
|
|
|
|
|
124 |
else:
|
125 |
input_ids_b = tokenizer_b.apply_chat_template(
|
126 |
new_messages_b,
|
|
|
24 |
"Spanish": "Eres un chatbot servicial que responde a las entradas de los usuarios de forma concisa y original."
|
25 |
}
|
26 |
|
27 |
+
htmL_info = "<center><h1>Pharia Battle Royale</h1><p>Let the games begin: In this bot arena, the Pharia 1 mode competes against a challenger. Try a prompt in a language you like. Set the parameters and vote for the best answers. After casting your vote, the bots reveal their identity.</p></center>"
|
28 |
|
29 |
model_info = [{"id": "Aleph-Alpha/Pharia-1-LLM-7B-control-hf",
|
30 |
"name": "Pharia 1 LLM 7B control hf"}]
|
|
|
109 |
|
110 |
if "Pharia" in model_info[0]['id']:
|
111 |
formatted_conversation = apply_pharia_template(messages=new_messages_a, add_generation_prompt=True)
|
112 |
+
tokenized = tokenizer_a(formatted_conversation, return_tensors="pt").to(device)
|
113 |
+
logging.debug(tokenized)
|
114 |
+
input_ids_a = tokenized.input_ids
|
115 |
else:
|
116 |
input_ids_a = tokenizer_a.apply_chat_template(
|
117 |
new_messages_a,
|
|
|
122 |
|
123 |
if "Pharia" in model_info[1]['id']:
|
124 |
formatted_conversation = apply_pharia_template(messages=new_messages_a, add_generation_prompt=True)
|
125 |
+
tokenized = tokenizer_a(formatted_conversation, return_tensors="pt").to(device)
|
126 |
+
logging.debug(tokenized)
|
127 |
+
input_ids_b = tokenized.input_ids
|
128 |
else:
|
129 |
input_ids_b = tokenizer_b.apply_chat_template(
|
130 |
new_messages_b,
|