Spaces:
Sleeping
Sleeping
| import json | |
| import requests | |
| import plotly.graph_objs as go | |
| import re | |
| import os | |
| import json | |
| from openai import OpenAI | |
| OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY") | |
| OPENROUTER_API_KEY = "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" | |
| or_client = OpenAI(api_key=OPENROUTER_API_KEY, base_url="https://openrouter.ai/api/v1") | |
| def chat_with_llama(messages, model="meta-llama/llama-3-70b-instruct:nitro"): | |
| response = or_client.chat.completions.create( | |
| model=model, | |
| messages=messages, | |
| max_tokens=4096, | |
| ) | |
| response_message = response.choices[0].message.content | |
| print(response_message, "\n\n\n") | |
| return response_message | |
| SysPrompt = """ | |
| You are a helpful expert ai assistant capable of executing python code in an interactive jupyter environment. Use the provided tools as needed to complete the given task. use markdown step to add detailed explanations in markdown format before the code. Use plotly as default charting library unless specified. Use IPython.display to render HTML and other compatible files. | |
| You should output only in the following xml format to perform any of the following steps: | |
| ## Output Format: | |
| <execute_python>python code here</execute_python> | |
| <markdown>any explanations in markdown format</markdown> | |
| these are the preinstalled Libraries in current environment: | |
| pandas | |
| matplotlib | |
| plotly | |
| yfinance | |
| numpy | |
| seaborn | |
| scikit-learn | |
| statsmodels | |
| geopandas | |
| geopy | |
| folium | |
| IPython | |
| """ | |
| # Function to execute code asynchronously | |
| def execute_code(code): | |
| headers = {"accept": "application/json", "Content-Type": "application/json"} | |
| data = {"session_token": "", "code": code} | |
| response = requests.post( | |
| "https://pvanand-code-execution.hf.space/execute", | |
| headers=headers, | |
| data=json.dumps(data), | |
| ) | |
| if response.status_code == 200: | |
| # Code execution returned results | |
| if response.json()["status"] == "success": | |
| output = response.json()["value"] | |
| else: | |
| # Code execution failed | |
| output = [ | |
| { | |
| "error": { | |
| "ename": "Execution request failed", | |
| "evalue": response.json()["value"], | |
| "traceback": [], | |
| } | |
| } | |
| ] | |
| print(response.json()["value"]) | |
| else: | |
| output = [] | |
| return output | |
| def extract_steps(text): | |
| steps = [] | |
| pattern = re.compile(r"<(\w+)>(.*?)</\1>", re.DOTALL) | |
| matches = pattern.findall(text) | |
| for tag, content in matches: | |
| if tag == "execute_python": | |
| content = re.sub(r"```python|```", "", content).strip() | |
| steps.append({"type": tag, "content": content.strip()}) | |
| return steps | |
| def execute_llm_code(code): | |
| try: | |
| output = execute_code(code) | |
| except Exception as e: | |
| # st.error("Exception occurred: " + str(e)) | |
| output = None | |
| return output | |
| def call_llm(history, model="meta-llama/llama-3-70b-instruct:nitro"): | |
| # Simulate LLM call_llm | |
| if history[0]["role"] != "system": | |
| history.insert(0, {"role": "system", "content": SysPrompt}) | |
| response = chat_with_llama(history, model=model) | |
| llm_steps = extract_steps(response) | |
| result = [] | |
| python_code = [] | |
| if llm_steps: | |
| for step in llm_steps: | |
| if step["type"] == "execute_python": | |
| python_code.append(step["content"]) | |
| output = execute_llm_code(code=step["content"]) | |
| if output != None: | |
| clear_output = process_execution_output(execution_output=output) | |
| result += clear_output | |
| else: | |
| pass | |
| else: | |
| result.append({"type": "text", "content": str(step["content"])}) | |
| return (result, response, python_code) | |
| else: | |
| return [response], response, python_code | |
| def process_execution_output(execution_output): | |
| OUTPUT = [] | |
| # st.write(output) | |
| if isinstance(execution_output, str): | |
| pass | |
| # Code Execution Output Only | |
| else: | |
| for item in execution_output: | |
| if "text" in item: | |
| exclude_list = [ | |
| "%%", | |
| "NoneType", | |
| "YFTzMissingError", | |
| "Failed download", | |
| "FutureWarning", | |
| ] | |
| if not list(filter(lambda x: x in str(item["text"]), exclude_list)): | |
| OUTPUT.append({"type": "text", "content": item["text"]}) | |
| elif "data" in item: | |
| if "image/png" in item["data"]: | |
| OUTPUT.append( | |
| {"type": "image", "content": item["data"]["image/png"]} | |
| ) | |
| elif "application/vnd.plotly.v1+json" in item["data"]: | |
| plotly_data = item["data"]["application/vnd.plotly.v1+json"] | |
| if isinstance(plotly_data, str): | |
| plotly_data = json.loads(plotly_data) | |
| go_json = str(go.Figure(plotly_data).to_json()) | |
| OUTPUT.append({"type": "plotly", "content": go_json}) | |
| elif "<folium.folium.Map at 0x7f2aef096f50>" in item["data"]: | |
| OUTPUT.append( | |
| {"type": "FoliumMap", "content": item["data"]["text/html"]} | |
| ) | |
| # None of the above and not an empty script then render html | |
| elif "text/html" in item["data"]: | |
| script_tag_only = ( | |
| item["data"]["text/html"].strip()[:7] == "<script" | |
| ) # TODO: Check full script tag | |
| if not script_tag_only: | |
| # st.html(item["data"]["text/html"]) | |
| OUTPUT.append( | |
| {"type": "HTML", "content": item["data"]["text/html"]} | |
| ) | |
| elif "error" in item: | |
| pass | |
| # st.error(f"Error: {item['error']['ename']} - {item['error']['evalue']}") | |
| return OUTPUT | |