peterpeter8585 commited on
Commit
fb3ffb2
Β·
verified Β·
1 Parent(s): 8ab7a09

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -1
app.py CHANGED
@@ -17,7 +17,7 @@ from typing import Optional, List, Dict, Any
17
  from langchain.llms.base import LLM
18
  import requests, os, json
19
  from requests.adapters import HTTPAdapter, Retry
20
-
21
  class GitHubModelLLM(LLM):
22
  """GitHub Models API 기반 LangChain LLM (λŒ€ν™” λ©”λͺ¨λ¦¬ 톡합 지원)"""
23
  model: str = "openai/gpt-4.1"
@@ -77,6 +77,66 @@ class GitHubModelLLM(LLM):
77
  res = self._post_chat(body)
78
  msg = res.get("choices", [{}])[0].get("message", {})
79
  return msg.get("content") or json.dumps(msg.get("function_call", {}))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  # ──────────────────────────────
81
  # βœ… LangChain 도ꡬ & μ—μ΄μ „νŠΈ ꡬ성
82
  # ──────────────────────────────
 
17
  from langchain.llms.base import LLM
18
  import requests, os, json
19
  from requests.adapters import HTTPAdapter, Retry
20
+ '''
21
  class GitHubModelLLM(LLM):
22
  """GitHub Models API 기반 LangChain LLM (λŒ€ν™” λ©”λͺ¨λ¦¬ 톡합 지원)"""
23
  model: str = "openai/gpt-4.1"
 
77
  res = self._post_chat(body)
78
  msg = res.get("choices", [{}])[0].get("message", {})
79
  return msg.get("content") or json.dumps(msg.get("function_call", {}))
80
+ '''
81
+ class GitHubModelLLM(LLM):
82
+ model: str = "openai/gpt-4.1"
83
+ endpoint: str = "https://models.github.ai/inference"
84
+ token: Optional[str] = os.environ.get("token")
85
+ system_prompt: Optional[str] = (
86
+ "λ„ˆλŠ” PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야. "
87
+ "λ„ˆμ˜ κ°œλ°œμžλŠ” μ •μ„±μœ€ μ΄λΌλŠ” 6ν•™λ…„ 파이썬 ν”„λ‘œκ·Έλž˜λ¨Έμ•Ό."
88
+ )
89
+ request_timeout: float = 30.0
90
+ max_retries: int = 2
91
+ backoff_factor: float = 0.3
92
+
93
+ @property
94
+ def _llm_type(self) -> str:
95
+ return "github_models_api"
96
+
97
+ def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]:
98
+ token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
99
+ if not token:
100
+ raise ValueError("❌ GitHub token이 μ„€μ •λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.")
101
+ session = requests.Session()
102
+ retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor,
103
+ status_forcelist=[429, 500, 502, 503, 504])
104
+ session.mount("https://", HTTPAdapter(max_retries=retries))
105
+ session.headers.update({
106
+ "Content-Type": "application/json",
107
+ "Authorization": f"Bearer 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"
108
+ })
109
+ resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout)
110
+ resp.raise_for_status()
111
+ return resp.json()
112
+
113
+ def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
114
+ """πŸ’¬ LangChain memory의 전체 λŒ€ν™” λ‚΄μš©μ„ λͺ¨λΈμ— 전달"""
115
+ memory = kwargs.get("memory")
116
+ messages = []
117
+
118
+ # 1️⃣ μ‹œμŠ€ν…œ ν”„λ‘¬ν”„νŠΈ
119
+ if self.system_prompt:
120
+ messages.append({"role": "system", "content": self.system_prompt})
121
+
122
+ # 2️⃣ λ©”λͺ¨λ¦¬μ— μ €μž₯된 이전 λŒ€ν™” 포함
123
+ if memory and hasattr(memory, "chat_memory"):
124
+ for msg in memory.chat_memory.messages:
125
+ if hasattr(msg, "type") and msg.type == "human":
126
+ messages.append({"role": "user", "content": msg.content})
127
+ elif hasattr(msg, "type") and msg.type == "ai":
128
+ messages.append({"role": "assistant", "content": msg.content})
129
+
130
+ # 3️⃣ ν˜„μž¬ μ‚¬μš©μž μž…λ ₯
131
+ messages.append({"role": "user", "content": prompt})
132
+
133
+ body = {"model": self.model, "messages": messages}
134
+ if stop:
135
+ body["stop"] = stop
136
+
137
+ res = self._post_chat(body)
138
+ msg = res.get("choices", [{}])[0].get("message", {})
139
+ return msg.get("content") or json.dumps(msg.get("function_call", {}))
140
  # ──────────────────────────────
141
  # βœ… LangChain 도ꡬ & μ—μ΄μ „νŠΈ ꡬ성
142
  # ──────────────────────────────