Humanlearning commited on
Commit
27d6c4f
·
1 Parent(s): f79d6c4

+ added a coding agent

Browse files
__pycache__/code_agent.cpython-313.pyc ADDED
Binary file (7.93 kB). View file
 
__pycache__/langraph_agent.cpython-313.pyc CHANGED
Binary files a/__pycache__/langraph_agent.cpython-313.pyc and b/__pycache__/langraph_agent.cpython-313.pyc differ
 
code_agent.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ LangGraph Code‑Interpreter Agent
3
+ ================================
4
+ A minimal, production‑ready example that wires a Python code‑execution tool into
5
+ a LangGraph workflow with an *LLM → plan → execute → reflect* loop.
6
+
7
+ Key changes (2025‑06‑20)
8
+ -----------------------
9
+ * **Whitelisted built‑ins** for safer `python_exec`.
10
+ * **Timeout guard** – aborts if the workflow exceeds a wall‑clock limit (default
11
+ 30 s, configurable via `LANGGRAPH_TIMEOUT_SEC`).
12
+ * **Dataclass state** – replaced untyped `Dict[str, Any]` with a typed
13
+ `@dataclass AgentState` for clearer intent and static‑analysis friendliness.
14
+
15
+ Dependencies
16
+ ------------
17
+ ```bash
18
+ pip install langgraph langchain openai tiktoken tenacity
19
+ ```
20
+
21
+ Set the environment variable `OPENAI_API_KEY` before running.
22
+ Optionally, you can swap `python_exec` with a sandboxed runner such as `e2b` or
23
+ `codeinterpreter-api`.
24
+ """
25
+ from __future__ import annotations
26
+
27
+ import contextlib
28
+ import io
29
+ import os
30
+ import textwrap
31
+ import time
32
+ import traceback
33
+ from dataclasses import dataclass, replace
34
+ from typing import Any, Optional
35
+ import re # For stripping markdown fences
36
+
37
+ from langchain_openai import ChatOpenAI
38
+ from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
39
+ from langchain_core.tools import tool
40
+ from langgraph.graph import END, StateGraph
41
+
42
+ ###############################################################################
43
+ # 0. Global config
44
+ ###############################################################################
45
+
46
+ MODEL_NAME = os.getenv("LANGGRAPH_MODEL", "gpt-4o-mini")
47
+ TIMEOUT_SEC = int(os.getenv("LANGGRAPH_TIMEOUT_SEC", "30"))
48
+
49
+ ###############################################################################
50
+ # 1. Code‑execution tool (whitelisted built‑ins)
51
+ ###############################################################################
52
+
53
+ ALLOWED_BUILTINS: dict[str, Any] = {
54
+ "print": print,
55
+ "range": range,
56
+ "len": len,
57
+ "abs": abs,
58
+ "sum": sum,
59
+ "min": min,
60
+ "max": max,
61
+ }
62
+
63
+ @tool
64
+ def python_exec(code: str) -> str:
65
+ """Execute **Python** inside a restricted namespace and capture STDOUT."""
66
+ code = textwrap.dedent(code)
67
+ exec_globals = {"__builtins__": ALLOWED_BUILTINS}
68
+ local_ns: dict[str, Any] = {}
69
+ stdout = io.StringIO()
70
+ try:
71
+ with contextlib.redirect_stdout(stdout):
72
+ exec(code, exec_globals, local_ns) # noqa: S102
73
+ return stdout.getvalue() or "Code executed successfully, no output."
74
+ except Exception:
75
+ return "ERROR:\n" + traceback.format_exc()
76
+
77
+ ###############################################################################
78
+ # 2. LLM backend
79
+ ###############################################################################
80
+
81
+ llm = ChatOpenAI(model_name=MODEL_NAME, temperature=0.0)
82
+
83
+ ###############################################################################
84
+ # 3. Dataclass‑based state & LangGraph
85
+ ###############################################################################
86
+
87
+ @dataclass
88
+ class AgentState:
89
+ """Typed state object carried through the graph."""
90
+
91
+ input: str
92
+ start_time: float
93
+ code: Optional[str] = None
94
+ exec_result: Optional[str] = None
95
+ tries: int = 0
96
+ done: bool = False
97
+
98
+
99
+ graph = StateGraph(AgentState)
100
+
101
+ # 3‑A Plan node – write code
102
+
103
+ def plan_node(state: AgentState) -> AgentState:
104
+ prompt = [
105
+ SystemMessage(
106
+ content=(
107
+ "You are an expert Python developer. Given a user request, "
108
+ "write self‑contained Python code that prints ONLY the final "
109
+ "answer via `print()`. Always avoid network calls."
110
+ )
111
+ ),
112
+ HumanMessage(content=state.input),
113
+ ]
114
+ code_block = _extract_code(llm(prompt).content)
115
+ return replace(state, code=code_block)
116
+
117
+ # 3‑B Execute node – run code
118
+
119
+ def exec_node(state: AgentState) -> AgentState:
120
+ output = python_exec(state.code or "")
121
+ return replace(state, exec_result=output)
122
+
123
+ # 3‑C Reflect node – repair on error (max 2 retries, with timeout guard)
124
+
125
+ def reflect_node(state: AgentState) -> AgentState:
126
+ if time.time() - state.start_time > TIMEOUT_SEC:
127
+ return replace(
128
+ state,
129
+ done=True,
130
+ exec_result=f"ERROR:\nTimeout: exceeded {TIMEOUT_SEC}s budget",
131
+ )
132
+
133
+ tries = state.tries + 1
134
+ if tries >= 2:
135
+ return replace(state, done=True, tries=tries)
136
+
137
+ prompt = [
138
+ SystemMessage(
139
+ content=(
140
+ "You are an expert Python debugger. Your job is to fix the "
141
+ "given code so it runs without errors and still answers the "
142
+ "original question. Return ONLY the corrected code."
143
+ )
144
+ ),
145
+ HumanMessage(content="Code:\n" + (state.code or "")),
146
+ AIMessage(content="Error:\n" + (state.exec_result or "")),
147
+ ]
148
+ fixed_code = _extract_code(llm(prompt).content)
149
+ return replace(state, code=fixed_code, tries=tries)
150
+
151
+ # 3‑D Wire nodes & conditional edges
152
+
153
+ graph.add_node("plan", plan_node)
154
+
155
+ graph.add_node("execute", exec_node)
156
+
157
+ graph.add_node("reflect", reflect_node)
158
+
159
+ graph.set_entry_point("plan")
160
+
161
+ graph.add_edge("plan", "execute")
162
+
163
+
164
+ def needs_fix(state: AgentState) -> bool:
165
+ return (state.exec_result or "").startswith("ERROR")
166
+
167
+ graph.add_conditional_edges(
168
+ "execute",
169
+ needs_fix,
170
+ {True: "reflect", False: END},
171
+ )
172
+
173
+ # After reflection, either run the fixed code again or terminate if `done`.
174
+
175
+ def should_continue(state: AgentState) -> bool:
176
+ """Return True to stop, False to continue executing."""
177
+ return state.done
178
+
179
+ graph.add_conditional_edges(
180
+ "reflect",
181
+ should_continue,
182
+ {True: END, False: "execute"},
183
+ )
184
+
185
+ agent = graph.compile()
186
+
187
+ ###############################################################################
188
+ # 4. Helper function & CLI entry‑point
189
+ ###############################################################################
190
+
191
+ def run_agent(query: str) -> str:
192
+ """Run the agent end‑to‑end and return the printed answer (or error)."""
193
+ init_state = AgentState(input=query, start_time=time.time())
194
+ final_state = agent.invoke(init_state)
195
+ # The compiled graph returns an AddableValuesDict (dict-like),
196
+ # so we access keys rather than attributes.
197
+ return final_state.get("exec_result", "No result")
198
+
199
+ # ---------------------------------------------------------------------------
200
+ # Helper to strip Markdown code fences (```python ... ```)
201
+ # ---------------------------------------------------------------------------
202
+
203
+ def _extract_code(text: str) -> str:
204
+ """Return the first code block in *text* or the raw text if none found."""
205
+ match = re.search(r"```(?:python|py)?\s*(.*?)```", text, flags=re.S | re.I)
206
+ return match.group(1).strip() if match else text.strip()
207
+
208
+ if __name__ == "__main__":
209
+ import sys
210
+
211
+ question = (
212
+ sys.argv[1] if len(sys.argv) > 1 else "What is the 10th Fibonacci number?"
213
+ )
214
+
215
+ print(run_agent(question))
langraph_agent.py CHANGED
@@ -16,6 +16,10 @@ from langchain_core.tools import tool
16
  from langchain.tools.retriever import create_retriever_tool
17
  from supabase.client import Client, create_client
18
  import requests # NEW: for HTTP requests to scoring API
 
 
 
 
19
 
20
  from langfuse.langchain import CallbackHandler
21
 
@@ -36,58 +40,6 @@ print(f"GROQ_API_KEY loaded: {bool(os.environ.get('GROQ_API_KEY'))}")
36
  # Base URL of the scoring API (duplicated here to avoid circular import with basic_agent)
37
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
38
 
39
- @tool
40
- def multiply(a: int, b: int) -> int:
41
- """Multiply two numbers.
42
-
43
- Args:
44
- a: first int
45
- b: second int
46
- """
47
- return a * b
48
-
49
- @tool
50
- def add(a: int, b: int) -> int:
51
- """Add two numbers.
52
-
53
- Args:
54
- a: first int
55
- b: second int
56
- """
57
- return a + b
58
-
59
- @tool
60
- def subtract(a: int, b: int) -> int:
61
- """Subtract two numbers.
62
-
63
- Args:
64
- a: first int
65
- b: second int
66
- """
67
- return a - b
68
-
69
- @tool
70
- def divide(a: int, b: int) -> int:
71
- """Divide two numbers.
72
-
73
- Args:
74
- a: first int
75
- b: second int
76
- """
77
- if b == 0:
78
- raise ValueError("Cannot divide by zero.")
79
- return a / b
80
-
81
- @tool
82
- def modulus(a: int, b: int) -> int:
83
- """Get the modulus of two numbers.
84
-
85
- Args:
86
- a: first int
87
- b: second int
88
- """
89
- return a % b
90
-
91
  @tool
92
  def wiki_search(input: str) -> str:
93
  """Search Wikipedia for a query and return maximum 2 results.
@@ -150,6 +102,17 @@ def arvix_search(input: str) -> str:
150
  print(f"Error in arvix_search: {e}")
151
  return {"arvix_results": f"Error searching Arxiv: {e}"}
152
 
 
 
 
 
 
 
 
 
 
 
 
153
  # load the system prompt from the file
154
  with open("system_prompt.txt", "r", encoding="utf-8") as f:
155
  system_prompt = f.read()
@@ -188,18 +151,61 @@ except Exception as e:
188
  create_retriever_tool = None
189
 
190
  tools = [
191
- multiply,
192
- add,
193
- subtract,
194
- divide,
195
- modulus,
196
  wiki_search,
197
  web_search,
198
  arvix_search,
 
199
  ]
200
  if create_retriever_tool:
201
  tools.append(create_retriever_tool)
202
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
  # Build graph function
204
  def build_graph(provider: str = "groq"):
205
  """Build the graph"""
@@ -233,7 +239,6 @@ def build_graph(provider: str = "groq"):
233
  return {"messages": [result]}
234
  except Exception as e:
235
  print(f"Error in assistant node: {e}")
236
- from langchain_core.messages import AIMessage
237
  error_msg = AIMessage(content=f"I encountered an error: {e}")
238
  return {"messages": [error_msg]}
239
 
@@ -313,8 +318,21 @@ def build_graph(provider: str = "groq"):
313
  builder.add_node("retriever", retriever)
314
  builder.add_node("assistant", assistant)
315
  builder.add_node("tools", ToolNode(tools))
 
 
 
316
  builder.add_edge(START, "retriever")
317
- builder.add_edge("retriever", "assistant")
 
 
 
 
 
 
 
 
 
 
318
  builder.add_conditional_edges(
319
  "assistant",
320
  tools_condition,
 
16
  from langchain.tools.retriever import create_retriever_tool
17
  from supabase.client import Client, create_client
18
  import requests # NEW: for HTTP requests to scoring API
19
+ from dataclasses import dataclass
20
+ import time # For timestamp in code agent wrapper
21
+ from code_agent import run_agent # Compiled code-interpreter graph helper
22
+ from langchain_core.messages import AIMessage
23
 
24
  from langfuse.langchain import CallbackHandler
25
 
 
40
  # Base URL of the scoring API (duplicated here to avoid circular import with basic_agent)
41
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  @tool
44
  def wiki_search(input: str) -> str:
45
  """Search Wikipedia for a query and return maximum 2 results.
 
102
  print(f"Error in arvix_search: {e}")
103
  return {"arvix_results": f"Error searching Arxiv: {e}"}
104
 
105
+ @tool
106
+ def run_python(input: str) -> str:
107
+ """Execute Python code in a restricted sandbox (code-interpreter).
108
+
109
+ Pass **any** coding or file-manipulation task here and the agent will
110
+ compute the answer by running Python. The entire standard library is NOT
111
+ available; heavy networking is disabled. Suitable for: math, data-frames,
112
+ small file parsing, algorithmic questions.
113
+ """
114
+ return run_agent(input)
115
+
116
  # load the system prompt from the file
117
  with open("system_prompt.txt", "r", encoding="utf-8") as f:
118
  system_prompt = f.read()
 
151
  create_retriever_tool = None
152
 
153
  tools = [
 
 
 
 
 
154
  wiki_search,
155
  web_search,
156
  arvix_search,
157
+ run_python,
158
  ]
159
  if create_retriever_tool:
160
  tools.append(create_retriever_tool)
161
 
162
+ # ---------------------------------------------------------------------------
163
+ # Code-interpreter integration helpers
164
+ # ---------------------------------------------------------------------------
165
+
166
+ from code_agent import run_agent # Executes the compiled code-interpreter graph
167
+
168
+
169
+ def _needs_code(state: dict) -> bool: # type: ignore[override]
170
+ """Heuristic: does *state* look like a coding request?"""
171
+ messages = state.get("messages", [])
172
+ if not messages:
173
+ return False
174
+ last_content = messages[-1].content.lower()
175
+ triggers = [
176
+ "```python",
177
+ "write python",
178
+ "run this code",
179
+ "file manipulation",
180
+ "csv",
181
+ "pandas",
182
+ "json",
183
+ "plot",
184
+ "fibonacci",
185
+ ]
186
+ return any(t in last_content for t in triggers)
187
+
188
+
189
+ def _code_exec_wrapper(state: dict): # type: ignore[override]
190
+ """Delegate the user query to the sandboxed Python interpreter."""
191
+ # Get the last human message's content (fallback to empty string)
192
+ human_msgs = [m.content for m in state.get("messages", []) if m.type == "human"]
193
+ query = "\n\n".join(human_msgs)
194
+
195
+ # Execute code-interpreter with full context (question + attachments)
196
+ result = run_agent(query)
197
+ # Persist the raw stdout so we can convert it to an AI message downstream
198
+ return {"code_result": result}
199
+
200
+
201
+ def _code_to_message(state: dict): # type: ignore[override]
202
+ """Turn the interpreter's stdout into an AIMessage so the LLM can see it."""
203
+ from langchain_core.messages import AIMessage # local import to avoid cycles
204
+
205
+ if not state.get("code_result"):
206
+ return {}
207
+ return {"messages": [AIMessage(content=state["code_result"])]}
208
+
209
  # Build graph function
210
  def build_graph(provider: str = "groq"):
211
  """Build the graph"""
 
239
  return {"messages": [result]}
240
  except Exception as e:
241
  print(f"Error in assistant node: {e}")
 
242
  error_msg = AIMessage(content=f"I encountered an error: {e}")
243
  return {"messages": [error_msg]}
244
 
 
318
  builder.add_node("retriever", retriever)
319
  builder.add_node("assistant", assistant)
320
  builder.add_node("tools", ToolNode(tools))
321
+ builder.add_node("code_exec", _code_exec_wrapper)
322
+ builder.add_node("code_to_message", _code_to_message)
323
+
324
  builder.add_edge(START, "retriever")
325
+ # Conditional branch: decide whether to run code interpreter
326
+ builder.add_conditional_edges(
327
+ "retriever",
328
+ _needs_code,
329
+ {True: "code_exec", False: "assistant"},
330
+ )
331
+
332
+ # Flow after code execution: inject result then resume chat
333
+ builder.add_edge("code_exec", "code_to_message")
334
+ builder.add_edge("code_to_message", "assistant")
335
+
336
  builder.add_conditional_edges(
337
  "assistant",
338
  tools_condition,
pyproject.toml CHANGED
@@ -16,6 +16,7 @@ dependencies = [
16
  "langchain-google-genai>=2.1.5",
17
  "langchain-groq>=0.3.2",
18
  "langchain-huggingface>=0.3.0",
 
19
  "langfuse>=3.0.0",
20
  "langgraph>=0.4.8",
21
  "llama-index>=0.12.40",
 
16
  "langchain-google-genai>=2.1.5",
17
  "langchain-groq>=0.3.2",
18
  "langchain-huggingface>=0.3.0",
19
+ "langchain-openai>=0.3.24",
20
  "langfuse>=3.0.0",
21
  "langgraph>=0.4.8",
22
  "llama-index>=0.12.40",
uv.lock CHANGED
@@ -501,6 +501,7 @@ dependencies = [
501
  { name = "langchain-google-genai" },
502
  { name = "langchain-groq" },
503
  { name = "langchain-huggingface" },
 
504
  { name = "langfuse" },
505
  { name = "langgraph" },
506
  { name = "llama-index" },
@@ -529,6 +530,7 @@ requires-dist = [
529
  { name = "langchain-google-genai", specifier = ">=2.1.5" },
530
  { name = "langchain-groq", specifier = ">=0.3.2" },
531
  { name = "langchain-huggingface", specifier = ">=0.3.0" },
 
532
  { name = "langfuse", specifier = ">=3.0.0" },
533
  { name = "langgraph", specifier = ">=0.4.8" },
534
  { name = "llama-index", specifier = ">=0.12.40" },
@@ -1279,6 +1281,20 @@ wheels = [
1279
  { url = "https://files.pythonhosted.org/packages/c1/da/7446c2eeacd420cb975ceb49c6feca7be40cf8ed3686a128ca78410c148f/langchain_huggingface-0.3.0-py3-none-any.whl", hash = "sha256:aab85d57e649c805d2f2a9f8d72d87b5d12c45dd4831309ac9c37753ddb237ed", size = 27359 },
1280
  ]
1281
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1282
  [[package]]
1283
  name = "langchain-text-splitters"
1284
  version = "0.3.8"
@@ -2018,7 +2034,7 @@ wheels = [
2018
 
2019
  [[package]]
2020
  name = "openai"
2021
- version = "1.84.0"
2022
  source = { registry = "https://pypi.org/simple" }
2023
  dependencies = [
2024
  { name = "anyio" },
@@ -2030,9 +2046,9 @@ dependencies = [
2030
  { name = "tqdm" },
2031
  { name = "typing-extensions" },
2032
  ]
2033
- sdist = { url = "https://files.pythonhosted.org/packages/91/a3/128caf24e116f48fad3e4d5122cdf84db06c5127911849d51663c66158c8/openai-1.84.0.tar.gz", hash = "sha256:4caa43bdab262cc75680ce1a2322cfc01626204074f7e8d9939ab372acf61698", size = 467066 }
2034
  wheels = [
2035
- { url = "https://files.pythonhosted.org/packages/2a/10/f245db006a860dbc1f2e2c8382e0a1762c7753e7971ba43a1dc3f3ec1404/openai-1.84.0-py3-none-any.whl", hash = "sha256:7ec4436c3c933d68dc0f5a0cef0cb3dbc0864a54d62bddaf2ed5f3d521844711", size = 725512 },
2036
  ]
2037
 
2038
  [[package]]
 
501
  { name = "langchain-google-genai" },
502
  { name = "langchain-groq" },
503
  { name = "langchain-huggingface" },
504
+ { name = "langchain-openai" },
505
  { name = "langfuse" },
506
  { name = "langgraph" },
507
  { name = "llama-index" },
 
530
  { name = "langchain-google-genai", specifier = ">=2.1.5" },
531
  { name = "langchain-groq", specifier = ">=0.3.2" },
532
  { name = "langchain-huggingface", specifier = ">=0.3.0" },
533
+ { name = "langchain-openai", specifier = ">=0.3.24" },
534
  { name = "langfuse", specifier = ">=3.0.0" },
535
  { name = "langgraph", specifier = ">=0.4.8" },
536
  { name = "llama-index", specifier = ">=0.12.40" },
 
1281
  { url = "https://files.pythonhosted.org/packages/c1/da/7446c2eeacd420cb975ceb49c6feca7be40cf8ed3686a128ca78410c148f/langchain_huggingface-0.3.0-py3-none-any.whl", hash = "sha256:aab85d57e649c805d2f2a9f8d72d87b5d12c45dd4831309ac9c37753ddb237ed", size = 27359 },
1282
  ]
1283
 
1284
+ [[package]]
1285
+ name = "langchain-openai"
1286
+ version = "0.3.24"
1287
+ source = { registry = "https://pypi.org/simple" }
1288
+ dependencies = [
1289
+ { name = "langchain-core" },
1290
+ { name = "openai" },
1291
+ { name = "tiktoken" },
1292
+ ]
1293
+ sdist = { url = "https://files.pythonhosted.org/packages/da/e1/7be9384c5cb6fd6d0466ac6e781e44c3d80081c624faa7a9d2f8bf3a59ba/langchain_openai-0.3.24.tar.gz", hash = "sha256:cec1ab4ce7a8680af1eb11427b4384d2ceb46e9b20ff3f7beb0b0d83cab61a97", size = 687773 }
1294
+ wheels = [
1295
+ { url = "https://files.pythonhosted.org/packages/fc/9b/b8f86d78dbc651decd684ab938a1340e1ad3ba1dbcef805e12db65dee0ba/langchain_openai-0.3.24-py3-none-any.whl", hash = "sha256:3db7bb2964f86636276a8f4bbed4514daf13865b80896e547ff7ea13ce98e593", size = 68950 },
1296
+ ]
1297
+
1298
  [[package]]
1299
  name = "langchain-text-splitters"
1300
  version = "0.3.8"
 
2034
 
2035
  [[package]]
2036
  name = "openai"
2037
+ version = "1.88.0"
2038
  source = { registry = "https://pypi.org/simple" }
2039
  dependencies = [
2040
  { name = "anyio" },
 
2046
  { name = "tqdm" },
2047
  { name = "typing-extensions" },
2048
  ]
2049
+ sdist = { url = "https://files.pythonhosted.org/packages/5a/ea/bbeef604d1fe0f7e9111745bb8a81362973a95713b28855beb9a9832ab12/openai-1.88.0.tar.gz", hash = "sha256:122d35e42998255cf1fc84560f6ee49a844e65c054cd05d3e42fda506b832bb1", size = 470963 }
2050
  wheels = [
2051
+ { url = "https://files.pythonhosted.org/packages/f4/03/ef68d77a38dd383cbed7fc898857d394d5a8b0520a35f054e7fe05dc3ac1/openai-1.88.0-py3-none-any.whl", hash = "sha256:7edd7826b3b83f5846562a6f310f040c79576278bf8e3687b30ba05bb5dff978", size = 734293 },
2052
  ]
2053
 
2054
  [[package]]