Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,28 +2,30 @@ import os
|
|
| 2 |
import tempfile
|
| 3 |
import subprocess
|
| 4 |
import streamlit as st
|
| 5 |
-
from transformers import AutoTokenizer
|
| 6 |
from huggingface_hub import InferenceClient
|
| 7 |
|
| 8 |
-
|
|
|
|
| 9 |
|
|
|
|
| 10 |
if "messages" not in st.session_state:
|
| 11 |
st.session_state.messages = []
|
| 12 |
|
|
|
|
| 13 |
with st.sidebar:
|
| 14 |
-
st.header("
|
| 15 |
selected_model = st.selectbox(
|
| 16 |
"Selecciona el modelo de Hugging Face:",
|
| 17 |
[
|
|
|
|
| 18 |
"Qwen/CodeQwen1.5-7B-Chat",
|
| 19 |
-
"meta-llama/CodeLlama-13b-Instruct-hf"
|
| 20 |
-
"bigcode/starcoder2-15b"
|
| 21 |
],
|
| 22 |
index=0
|
| 23 |
)
|
| 24 |
|
| 25 |
st.markdown("---")
|
| 26 |
-
st.markdown("### Entorno de
|
| 27 |
st.code("g++ -O3 -std=c++17", language="bash")
|
| 28 |
|
| 29 |
if st.button("Limpiar Historial"):
|
|
@@ -40,7 +42,7 @@ def get_system_prompt() -> str:
|
|
| 40 |
)
|
| 41 |
|
| 42 |
def extract_cpp_code(response_text: str) -> str:
|
| 43 |
-
"""Extrae el
|
| 44 |
if "```cpp" in response_text:
|
| 45 |
code = response_text.split("```cpp")[1].split("```")[0]
|
| 46 |
elif "```c++" in response_text:
|
|
@@ -52,7 +54,7 @@ def extract_cpp_code(response_text: str) -> str:
|
|
| 52 |
return code.strip()
|
| 53 |
|
| 54 |
def compile_and_run_cpp(cpp_code: str) -> str:
|
| 55 |
-
"""Compila y ejecuta el
|
| 56 |
with tempfile.TemporaryDirectory() as tmpdir:
|
| 57 |
cpp_file = os.path.join(tmpdir, "optimized.cpp")
|
| 58 |
exe_file = os.path.join(tmpdir, "optimized_bin")
|
|
@@ -60,86 +62,78 @@ def compile_and_run_cpp(cpp_code: str) -> str:
|
|
| 60 |
with open(cpp_file, "w") as f:
|
| 61 |
f.write(cpp_code)
|
| 62 |
|
| 63 |
-
# Compilacion estricta con C++17 (HF Spaces usa Debian/Ubuntu con g++ disponible)
|
| 64 |
compile_cmd = ["g++", "-O3", "-std=c++17", cpp_file, "-o", exe_file]
|
| 65 |
|
| 66 |
try:
|
| 67 |
# Compilar
|
| 68 |
comp_process = subprocess.run(compile_cmd, capture_output=True, text=True, check=True)
|
| 69 |
-
|
| 70 |
# Ejecutar
|
| 71 |
run_process = subprocess.run([exe_file], capture_output=True, text=True, timeout=10)
|
| 72 |
|
| 73 |
output = run_process.stdout
|
| 74 |
if run_process.stderr:
|
| 75 |
output += f"\nWarnings/Errors:\n{run_process.stderr}"
|
| 76 |
-
return output if output else "
|
| 77 |
|
| 78 |
except subprocess.CalledProcessError as e:
|
| 79 |
-
return f"Error de
|
| 80 |
except subprocess.TimeoutExpired:
|
| 81 |
-
return "Timeout: La
|
| 82 |
|
| 83 |
-
st.title("Python ➔ C++ Auto-Translator")
|
| 84 |
-
st.markdown("Pega tu
|
| 85 |
|
|
|
|
| 86 |
for msg in st.session_state.messages:
|
| 87 |
with st.chat_message(msg["role"]):
|
| 88 |
if msg["role"] == "user":
|
| 89 |
st.code(msg["content"], language="python")
|
| 90 |
else:
|
| 91 |
st.code(msg["content"]["code"], language="cpp")
|
| 92 |
-
with st.expander("Ver salida de
|
| 93 |
st.text(msg["content"]["output"])
|
| 94 |
|
| 95 |
-
|
| 96 |
-
|
| 97 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 98 |
with st.chat_message("user"):
|
| 99 |
st.code(prompt, language="python")
|
| 100 |
|
| 101 |
-
# Preparar el cliente de Hugging Face
|
| 102 |
hf_token = os.getenv("HF_TOKEN")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
client = InferenceClient(model=selected_model, token=hf_token)
|
| 104 |
-
tokenizer = AutoTokenizer.from_pretrained(selected_model)
|
| 105 |
|
| 106 |
messages_for_hf = [
|
| 107 |
{"role": "system", "content": get_system_prompt()},
|
| 108 |
{"role": "user", "content": f"Translate this Python code to C++:\n\n{prompt}"}
|
| 109 |
]
|
| 110 |
|
| 111 |
-
# Convertir al formato de chat específico del modelo
|
| 112 |
-
try:
|
| 113 |
-
formatted_prompt = tokenizer.apply_chat_template(messages_for_hf, tokenize=False, add_generation_prompt=True)
|
| 114 |
-
except Exception:
|
| 115 |
-
# Fallback por si el tokenizador no soporta apply_chat_template directamente
|
| 116 |
-
formatted_prompt = f"{get_system_prompt()}\n\nUser: Translate this Python code to C++:\n{prompt}\n\nAssistant:"
|
| 117 |
-
|
| 118 |
-
# Generar respuesta del asistente
|
| 119 |
with st.chat_message("assistant"):
|
| 120 |
-
with st.spinner("
|
| 121 |
try:
|
| 122 |
-
#
|
| 123 |
-
response = client.
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
temperature=0.1
|
| 127 |
-
return_full_text=False
|
| 128 |
)
|
| 129 |
|
| 130 |
-
|
|
|
|
| 131 |
st.code(clean_cpp, language="cpp")
|
| 132 |
|
| 133 |
st.markdown("---")
|
| 134 |
-
st.markdown("**Ejecutando binario...**")
|
| 135 |
|
| 136 |
with st.spinner("Compilando y ejecutando..."):
|
| 137 |
execution_output = compile_and_run_cpp(clean_cpp)
|
| 138 |
|
| 139 |
-
with st.expander("Salida de
|
| 140 |
st.text(execution_output)
|
| 141 |
|
| 142 |
-
# Guardar en el historial
|
| 143 |
st.session_state.messages.append({
|
| 144 |
"role": "assistant",
|
| 145 |
"content": {
|
|
@@ -149,4 +143,4 @@ if prompt := st.chat_input("Escribe o pega tu codigo Python aqui..."):
|
|
| 149 |
})
|
| 150 |
|
| 151 |
except Exception as e:
|
| 152 |
-
st.error(f"Error
|
|
|
|
| 2 |
import tempfile
|
| 3 |
import subprocess
|
| 4 |
import streamlit as st
|
|
|
|
| 5 |
from huggingface_hub import InferenceClient
|
| 6 |
|
| 7 |
+
# Configuración de la página
|
| 8 |
+
st.set_page_config(page_title="Python to C++17 Translator", page_icon="⚙️", layout="wide")
|
| 9 |
|
| 10 |
+
# Inicializar estado del chat
|
| 11 |
if "messages" not in st.session_state:
|
| 12 |
st.session_state.messages = []
|
| 13 |
|
| 14 |
+
# Sidebar para configuración
|
| 15 |
with st.sidebar:
|
| 16 |
+
st.header("Configuración del Modelo")
|
| 17 |
selected_model = st.selectbox(
|
| 18 |
"Selecciona el modelo de Hugging Face:",
|
| 19 |
[
|
| 20 |
+
"Qwen/Qwen2.5-Coder-32B-Instruct", # Actualmente el mejor modelo gratuito para código
|
| 21 |
"Qwen/CodeQwen1.5-7B-Chat",
|
| 22 |
+
"meta-llama/CodeLlama-13b-Instruct-hf"
|
|
|
|
| 23 |
],
|
| 24 |
index=0
|
| 25 |
)
|
| 26 |
|
| 27 |
st.markdown("---")
|
| 28 |
+
st.markdown("### Entorno de Ejecución")
|
| 29 |
st.code("g++ -O3 -std=c++17", language="bash")
|
| 30 |
|
| 31 |
if st.button("Limpiar Historial"):
|
|
|
|
| 42 |
)
|
| 43 |
|
| 44 |
def extract_cpp_code(response_text: str) -> str:
|
| 45 |
+
"""Extrae el código C++ limpiando los bloques de markdown."""
|
| 46 |
if "```cpp" in response_text:
|
| 47 |
code = response_text.split("```cpp")[1].split("```")[0]
|
| 48 |
elif "```c++" in response_text:
|
|
|
|
| 54 |
return code.strip()
|
| 55 |
|
| 56 |
def compile_and_run_cpp(cpp_code: str) -> str:
|
| 57 |
+
"""Compila y ejecuta el código C++ en el entorno Linux de HF Spaces."""
|
| 58 |
with tempfile.TemporaryDirectory() as tmpdir:
|
| 59 |
cpp_file = os.path.join(tmpdir, "optimized.cpp")
|
| 60 |
exe_file = os.path.join(tmpdir, "optimized_bin")
|
|
|
|
| 62 |
with open(cpp_file, "w") as f:
|
| 63 |
f.write(cpp_code)
|
| 64 |
|
|
|
|
| 65 |
compile_cmd = ["g++", "-O3", "-std=c++17", cpp_file, "-o", exe_file]
|
| 66 |
|
| 67 |
try:
|
| 68 |
# Compilar
|
| 69 |
comp_process = subprocess.run(compile_cmd, capture_output=True, text=True, check=True)
|
|
|
|
| 70 |
# Ejecutar
|
| 71 |
run_process = subprocess.run([exe_file], capture_output=True, text=True, timeout=10)
|
| 72 |
|
| 73 |
output = run_process.stdout
|
| 74 |
if run_process.stderr:
|
| 75 |
output += f"\nWarnings/Errors:\n{run_process.stderr}"
|
| 76 |
+
return output if output else "Ejecución completada sin salida (stdout vacío)."
|
| 77 |
|
| 78 |
except subprocess.CalledProcessError as e:
|
| 79 |
+
return f"⚠️ Error de Compilación:\n{e.stderr}"
|
| 80 |
except subprocess.TimeoutExpired:
|
| 81 |
+
return "⏳ Timeout: La ejecución excedió los 10 segundos."
|
| 82 |
|
| 83 |
+
st.title("🐍 Python ➔ ⚙️ C++17 Auto-Translator")
|
| 84 |
+
st.markdown("Pega tu código de Python en el chat. El sistema generará la traducción a C++17 y la ejecutará automáticamente.")
|
| 85 |
|
| 86 |
+
# Renderizar el historial de mensajes
|
| 87 |
for msg in st.session_state.messages:
|
| 88 |
with st.chat_message(msg["role"]):
|
| 89 |
if msg["role"] == "user":
|
| 90 |
st.code(msg["content"], language="python")
|
| 91 |
else:
|
| 92 |
st.code(msg["content"]["code"], language="cpp")
|
| 93 |
+
with st.expander("Ver salida de ejecución"):
|
| 94 |
st.text(msg["content"]["output"])
|
| 95 |
|
| 96 |
+
# Manejo de nuevo input en el chat
|
| 97 |
+
if prompt := st.chat_input("Escribe o pega tu código Python aquí..."):
|
| 98 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 99 |
with st.chat_message("user"):
|
| 100 |
st.code(prompt, language="python")
|
| 101 |
|
|
|
|
| 102 |
hf_token = os.getenv("HF_TOKEN")
|
| 103 |
+
if not hf_token:
|
| 104 |
+
st.error("⚠️ ERROR CRÍTICO: No se encontró el HF_TOKEN. Ve a Settings -> Variables and secrets y configúralo.")
|
| 105 |
+
st.stop()
|
| 106 |
+
|
| 107 |
client = InferenceClient(model=selected_model, token=hf_token)
|
|
|
|
| 108 |
|
| 109 |
messages_for_hf = [
|
| 110 |
{"role": "system", "content": get_system_prompt()},
|
| 111 |
{"role": "user", "content": f"Translate this Python code to C++:\n\n{prompt}"}
|
| 112 |
]
|
| 113 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
with st.chat_message("assistant"):
|
| 115 |
+
with st.spinner("Conectando con la API y traduciendo a C++17..."):
|
| 116 |
try:
|
| 117 |
+
# Usamos la nueva API de chat (más estable, no requiere Tokenizer local)
|
| 118 |
+
response = client.chat_completion(
|
| 119 |
+
messages=messages_for_hf,
|
| 120 |
+
max_tokens=3000,
|
| 121 |
+
temperature=0.1
|
|
|
|
| 122 |
)
|
| 123 |
|
| 124 |
+
generated_text = response.choices[0].message.content
|
| 125 |
+
clean_cpp = extract_cpp_code(generated_text)
|
| 126 |
st.code(clean_cpp, language="cpp")
|
| 127 |
|
| 128 |
st.markdown("---")
|
| 129 |
+
st.markdown("⚡ **Ejecutando binario...**")
|
| 130 |
|
| 131 |
with st.spinner("Compilando y ejecutando..."):
|
| 132 |
execution_output = compile_and_run_cpp(clean_cpp)
|
| 133 |
|
| 134 |
+
with st.expander("Salida de ejecución (stdout)", expanded=True):
|
| 135 |
st.text(execution_output)
|
| 136 |
|
|
|
|
| 137 |
st.session_state.messages.append({
|
| 138 |
"role": "assistant",
|
| 139 |
"content": {
|
|
|
|
| 143 |
})
|
| 144 |
|
| 145 |
except Exception as e:
|
| 146 |
+
st.error(f"Error detallado de la API: {type(e).__name__} - {str(e)}")
|