Humanlearning commited on
Commit
0989d5f
·
1 Parent(s): 27d6c4f

updated temperature value and use of groq model for faster execution

Browse files
__pycache__/code_agent.cpython-313.pyc CHANGED
Binary files a/__pycache__/code_agent.cpython-313.pyc and b/__pycache__/code_agent.cpython-313.pyc differ
 
__pycache__/langraph_agent.cpython-313.pyc CHANGED
Binary files a/__pycache__/langraph_agent.cpython-313.pyc and b/__pycache__/langraph_agent.cpython-313.pyc differ
 
code_agent.py CHANGED
@@ -34,7 +34,7 @@ from dataclasses import dataclass, replace
34
  from typing import Any, Optional
35
  import re # For stripping markdown fences
36
 
37
- from langchain_openai import ChatOpenAI
38
  from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
39
  from langchain_core.tools import tool
40
  from langgraph.graph import END, StateGraph
@@ -43,7 +43,7 @@ from langgraph.graph import END, StateGraph
43
  # 0. Global config
44
  ###############################################################################
45
 
46
- MODEL_NAME = os.getenv("LANGGRAPH_MODEL", "gpt-4o-mini")
47
  TIMEOUT_SEC = int(os.getenv("LANGGRAPH_TIMEOUT_SEC", "30"))
48
 
49
  ###############################################################################
@@ -78,7 +78,7 @@ def python_exec(code: str) -> str:
78
  # 2. LLM backend
79
  ###############################################################################
80
 
81
- llm = ChatOpenAI(model_name=MODEL_NAME, temperature=0.0)
82
 
83
  ###############################################################################
84
  # 3. Dataclass‑based state & LangGraph
 
34
  from typing import Any, Optional
35
  import re # For stripping markdown fences
36
 
37
+ from langchain_groq import ChatGroq
38
  from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
39
  from langchain_core.tools import tool
40
  from langgraph.graph import END, StateGraph
 
43
  # 0. Global config
44
  ###############################################################################
45
 
46
+ MODEL_NAME = os.getenv("LANGGRAPH_MODEL", "qwen-qwq-32b")
47
  TIMEOUT_SEC = int(os.getenv("LANGGRAPH_TIMEOUT_SEC", "30"))
48
 
49
  ###############################################################################
 
78
  # 2. LLM backend
79
  ###############################################################################
80
 
81
+ llm = ChatGroq(model=MODEL_NAME, temperature= 0.6)
82
 
83
  ###############################################################################
84
  # 3. Dataclass‑based state & LangGraph
langraph_agent.py CHANGED
@@ -215,7 +215,7 @@ def build_graph(provider: str = "groq"):
215
  llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
216
  elif provider == "groq":
217
  # Groq https://console.groq.com/docs/models
218
- llm = ChatGroq(model="qwen-qwq-32b", temperature=0) # optional : qwen-qwq-32b gemma2-9b-it
219
  elif provider == "huggingface":
220
  # TODO: Add huggingface endpoint
221
  llm = ChatHuggingFace(
 
215
  llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
216
  elif provider == "groq":
217
  # Groq https://console.groq.com/docs/models
218
+ llm = ChatGroq(model="qwen-qwq-32b", temperature= 0.6) # optional : qwen-qwq-32b gemma2-9b-it
219
  elif provider == "huggingface":
220
  # TODO: Add huggingface endpoint
221
  llm = ChatHuggingFace(