Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -17,7 +17,7 @@ from typing import Optional, List, Dict, Any
|
|
| 17 |
from langchain.llms.base import LLM
|
| 18 |
import requests, os, json
|
| 19 |
from requests.adapters import HTTPAdapter, Retry
|
| 20 |
-
|
| 21 |
class GitHubModelLLM(LLM):
|
| 22 |
"""GitHub Models API κΈ°λ° LangChain LLM (λν λ©λͺ¨λ¦¬ ν΅ν© μ§μ)"""
|
| 23 |
model: str = "openai/gpt-4.1"
|
|
@@ -77,6 +77,66 @@ class GitHubModelLLM(LLM):
|
|
| 77 |
res = self._post_chat(body)
|
| 78 |
msg = res.get("choices", [{}])[0].get("message", {})
|
| 79 |
return msg.get("content") or json.dumps(msg.get("function_call", {}))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
# ββββββββββββββββββββββββββββββ
|
| 81 |
# β
LangChain λꡬ & μμ΄μ νΈ κ΅¬μ±
|
| 82 |
# ββββββββββββββββββββββββββββββ
|
|
|
|
| 17 |
from langchain.llms.base import LLM
|
| 18 |
import requests, os, json
|
| 19 |
from requests.adapters import HTTPAdapter, Retry
|
| 20 |
+
'''
|
| 21 |
class GitHubModelLLM(LLM):
|
| 22 |
"""GitHub Models API κΈ°λ° LangChain LLM (λν λ©λͺ¨λ¦¬ ν΅ν© μ§μ)"""
|
| 23 |
model: str = "openai/gpt-4.1"
|
|
|
|
| 77 |
res = self._post_chat(body)
|
| 78 |
msg = res.get("choices", [{}])[0].get("message", {})
|
| 79 |
return msg.get("content") or json.dumps(msg.get("function_call", {}))
|
| 80 |
+
'''
|
| 81 |
+
class GitHubModelLLM(LLM):
|
| 82 |
+
model: str = "openai/gpt-4.1"
|
| 83 |
+
endpoint: str = "https://models.github.ai/inference"
|
| 84 |
+
token: Optional[str] = os.environ.get("token")
|
| 85 |
+
system_prompt: Optional[str] = (
|
| 86 |
+
"λλ PIXAL(Primary Interactive X-ternal Assistant with multi Language)μ΄μΌ. "
|
| 87 |
+
"λμ κ°λ°μλ μ μ±μ€ μ΄λΌλ 6νλ
νμ΄μ¬ νλ‘κ·Έλλ¨ΈμΌ."
|
| 88 |
+
)
|
| 89 |
+
request_timeout: float = 30.0
|
| 90 |
+
max_retries: int = 2
|
| 91 |
+
backoff_factor: float = 0.3
|
| 92 |
+
|
| 93 |
+
@property
|
| 94 |
+
def _llm_type(self) -> str:
|
| 95 |
+
return "github_models_api"
|
| 96 |
+
|
| 97 |
+
def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]:
|
| 98 |
+
token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
|
| 99 |
+
if not token:
|
| 100 |
+
raise ValueError("β GitHub tokenμ΄ μ€μ λμ§ μμμ΅λλ€.")
|
| 101 |
+
session = requests.Session()
|
| 102 |
+
retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor,
|
| 103 |
+
status_forcelist=[429, 500, 502, 503, 504])
|
| 104 |
+
session.mount("https://", HTTPAdapter(max_retries=retries))
|
| 105 |
+
session.headers.update({
|
| 106 |
+
"Content-Type": "application/json",
|
| 107 |
+
"Authorization": f"Bearer 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"
|
| 108 |
+
})
|
| 109 |
+
resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout)
|
| 110 |
+
resp.raise_for_status()
|
| 111 |
+
return resp.json()
|
| 112 |
+
|
| 113 |
+
def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
|
| 114 |
+
"""π¬ LangChain memoryμ μ 체 λν λ΄μ©μ λͺ¨λΈμ μ λ¬"""
|
| 115 |
+
memory = kwargs.get("memory")
|
| 116 |
+
messages = []
|
| 117 |
+
|
| 118 |
+
# 1οΈβ£ μμ€ν
ν둬ννΈ
|
| 119 |
+
if self.system_prompt:
|
| 120 |
+
messages.append({"role": "system", "content": self.system_prompt})
|
| 121 |
+
|
| 122 |
+
# 2οΈβ£ λ©λͺ¨λ¦¬μ μ μ₯λ μ΄μ λν ν¬ν¨
|
| 123 |
+
if memory and hasattr(memory, "chat_memory"):
|
| 124 |
+
for msg in memory.chat_memory.messages:
|
| 125 |
+
if hasattr(msg, "type") and msg.type == "human":
|
| 126 |
+
messages.append({"role": "user", "content": msg.content})
|
| 127 |
+
elif hasattr(msg, "type") and msg.type == "ai":
|
| 128 |
+
messages.append({"role": "assistant", "content": msg.content})
|
| 129 |
+
|
| 130 |
+
# 3οΈβ£ νμ¬ μ¬μ©μ μ
λ ₯
|
| 131 |
+
messages.append({"role": "user", "content": prompt})
|
| 132 |
+
|
| 133 |
+
body = {"model": self.model, "messages": messages}
|
| 134 |
+
if stop:
|
| 135 |
+
body["stop"] = stop
|
| 136 |
+
|
| 137 |
+
res = self._post_chat(body)
|
| 138 |
+
msg = res.get("choices", [{}])[0].get("message", {})
|
| 139 |
+
return msg.get("content") or json.dumps(msg.get("function_call", {}))
|
| 140 |
# ββββββββββββββββββββββββββββββ
|
| 141 |
# β
LangChain λꡬ & μμ΄μ νΈ κ΅¬μ±
|
| 142 |
# ββββββββββββββββββββββββββββββ
|