MightyOctopus commited on
Commit
721f11c
·
verified ·
1 Parent(s): 4904d13

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +215 -0
app.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, io, sys, subprocess, shutil
2
+ from dotenv import load_dotenv
3
+ from openai import OpenAI
4
+ from google import genai
5
+ import gradio as gr
6
+ from datetime import datetime
7
+ from placeholder_python_code import pi_1, pi_2
8
+ from css_elements import css_elements
9
+
10
+ ### Environment
11
+ load_dotenv(".env")
12
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
13
+ ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
14
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
15
+
16
+ ### Initialize
17
+ openai_client = OpenAI(api_key=OPENAI_API_KEY)
18
+ gemini_client = genai.Client(api_key=GEMINI_API_KEY)
19
+ OPENAI_MODEL = "gpt-5-mini-2025-08-07"
20
+ GEMINI_MODEL = "gemini-1.5-flash"
21
+
22
+
23
+ system_message = """
24
+ You are an assistant that reimplements Python code in high performance C++
25
+ for an M1 Mac.
26
+ """.strip()
27
+
28
+ system_message += """
29
+ Respond only with C++; use comments sparingly and do not provide any
30
+ explanation other than occasion comments.
31
+ """.strip()
32
+
33
+ system_message += """
34
+ The C++ response needs to produce an identical output in the
35
+ fastest possible time.
36
+ """.strip()
37
+
38
+ current_time = datetime.now().strftime("%Y%m%d_%H:%M:%S")
39
+
40
+ def user_prompt_for_python(python_code):
41
+ user_prompt = """
42
+ Rewrite this Python code in C++ with the fastest possible implementation
43
+ that produces identical output in the least time.
44
+ """.strip()
45
+ user_prompt += """
46
+ Respond only with C++ code; do not explain your work other than the real code.
47
+ And also do not include ```cpp and as such. But just working code only!
48
+ """.strip()
49
+ user_prompt += """
50
+ Pay attention to number types to ensure no int overflows. Remember to #include
51
+ all necessary C++ packages such as iomanip.\n\n
52
+ """.strip()
53
+ user_prompt += python_code
54
+
55
+ return user_prompt
56
+
57
+ def messages_for_python(python):
58
+ return [
59
+ {"role": "system", "content": system_message},
60
+ {"role": "user", "content": user_prompt_for_python(python)}
61
+ ]
62
+
63
+ ### remove ```cpp and ```
64
+ ### cpp is a file extension of C++ code
65
+ def write_output(cpp: str):
66
+ code = cpp.replace("```cpp", "").replace("```", "")
67
+ with open(f"optimized-{current_time}.cpp", "w") as f:
68
+ f.write(code)
69
+
70
+ def convert_and_optimize_code_with_openai(python):
71
+ stream = openai_client.chat.completions.create(
72
+ model=OPENAI_MODEL,
73
+ messages=messages_for_python(python),
74
+ stream=True
75
+ )
76
+ stream_response = ""
77
+ for chunk in stream:
78
+ fragment = chunk.choices[0].delta.content or ""
79
+ stream_response += fragment
80
+ # print(fragment, end="", flush=True)
81
+
82
+ yield fragment
83
+
84
+ def convert_and_optimize_code_with_gemini(python):
85
+ user_prompt = user_prompt_for_python(python)
86
+
87
+ stream = gemini_client.models.generate_content_stream(
88
+ model=GEMINI_MODEL,
89
+ contents=user_prompt
90
+ )
91
+
92
+ for chunk in stream:
93
+ stream_response = getattr(chunk, "text", None)
94
+ if stream_response:
95
+ yield stream_response
96
+
97
+ ### OR THIS -- Gemini model returns an object other than string. So it needs to retrieve the text
98
+ # for chunk in stream:
99
+ # if chunk.text:
100
+ # yield chunk.text
101
+
102
+
103
+ # convert_and_optimize_code_with_openai(pi_1)
104
+
105
+
106
+ ###============================= GRADIO UI ================================###
107
+
108
+ def stream_text_on_ui(model, pi):
109
+ """
110
+ :param model: The selected LLM model used for converting Python code to C++
111
+ :param pi: Input Python code string
112
+ :yield response: Each chunk of stream data(generated text) received from LLM call
113
+ """
114
+ response = ""
115
+ if model == "GPT-5":
116
+ stream_res = convert_and_optimize_code_with_openai(pi)
117
+ elif model == "Gemini":
118
+ stream_res = convert_and_optimize_code_with_gemini(pi)
119
+ else:
120
+ raise ValueError("Unknown model...")
121
+ ### another loop to take in the streaming chunk
122
+ for chunk in stream_res:
123
+ response += chunk
124
+ response = response.replace("```cpp", "").replace("```", "").replace("cpp", "")
125
+ yield response
126
+
127
+ def run_python_code(code: str):
128
+ output = io.StringIO()
129
+ old_stdout = sys.stdout
130
+ try:
131
+ sys.stdout = output
132
+ ### For proper ISOLATION: use a fresh globals __main__ dict
133
+ exec(code, {"__name__": "__main__"})
134
+ return output.getvalue()
135
+ except Exception as e:
136
+ return output.getvalue() + f"-- {e}"
137
+ finally:
138
+ sys.stdout = old_stdout
139
+
140
+ ### subprocess used to connect to the external programs (g++ for c++ build and compile)
141
+ def run_cpp_code(code: str):
142
+ write_output(code)
143
+ try:
144
+ compiler = shutil.which("clang++") or shutil.which("g++")
145
+ if not compiler:
146
+ return "Error: No C++ compiler found in container."
147
+
148
+ ### 1. Compile the code
149
+ compile_cmd = [
150
+ compiler, "-Ofast", "-std=c++17",
151
+ "-march=armv8.5-a", "-mtune=apple-m1",
152
+ "-mcpu=apple-m1", "-o", "optimized",
153
+ f"optimized-{current_time}.cpp"
154
+ ]
155
+ subprocess.run(
156
+ compile_cmd, check=True, text=True, capture_output=True
157
+ )
158
+
159
+ ### 2. Run the code
160
+ run_cmd = [f"./optimized"]
161
+ run_result = subprocess.run(
162
+ run_cmd, check=True, text=True, capture_output=True
163
+ )
164
+ return run_result.stdout
165
+ except subprocess.CalledProcessError as e:
166
+ return f"An error occurred:\n{e.stderr}"
167
+
168
+
169
+ with gr.Blocks(
170
+ css=css_elements,
171
+ title="Python To C++ Code Convertor"
172
+ ) as ui:
173
+ with gr.Row():
174
+ pi_textbox = gr.Textbox(label="Place Python Code Here:", lines=20, value=pi_1)
175
+ cpp_output = gr.Textbox(label="C++ Code Converted:", lines=20)
176
+
177
+ with gr.Row():
178
+ model_selection = gr.Dropdown(
179
+ choices=["GPT-5", "Gemini"],
180
+ label="Select Model",
181
+ value="GPT-5",
182
+ interactive=True
183
+ )
184
+
185
+ with gr.Row():
186
+ convert_btn = gr.Button(value="Convert", size="lg")
187
+
188
+ with gr.Row():
189
+ run_py_btn = gr.Button(value="Run Python")
190
+ run_cpp_btn = gr.Button(value="Run C++")
191
+
192
+ with gr.Row():
193
+ python_out = gr.TextArea(label="Python Result:", elem_classes=["python"])
194
+ cpp_out = gr.TextArea(label="C++ Result:", elem_classes=["cpp"])
195
+
196
+ convert_btn.click(
197
+ fn=stream_text_on_ui,
198
+ inputs=[model_selection, pi_textbox],
199
+ outputs=cpp_output
200
+ )
201
+
202
+ run_py_btn.click(
203
+ fn=run_python_code,
204
+ inputs=pi_textbox,
205
+ outputs=python_out
206
+ )
207
+ run_cpp_btn.click(
208
+ fn=run_cpp_code,
209
+ inputs=cpp_output,
210
+ outputs=cpp_out
211
+ )
212
+
213
+
214
+ port = int(os.getenv("PORT", 7860))
215
+ ui.launch(server_name="0.0.0.0", server_port=port)