Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -19,7 +19,7 @@ import requests, os, json
|
|
| 19 |
from requests.adapters import HTTPAdapter, Retry
|
| 20 |
from typing import List, Optional, Dict, Any
|
| 21 |
|
| 22 |
-
|
| 23 |
class GitHubModelLLM(LLM):
|
| 24 |
"""GitHub Models API๋ฅผ ์ฌ์ฉํ๋ ChatOpenAI ๋์ฒด ํด๋์ค"""
|
| 25 |
|
|
@@ -72,13 +72,87 @@ class GitHubModelLLM(LLM):
|
|
| 72 |
res = self._post(body)
|
| 73 |
content = res.get("choices", [{}])[0].get("message", {}).get("content", "")
|
| 74 |
return content
|
| 75 |
-
|
| 76 |
async def _acall(self, messages: List[Any], stop: Optional[List[str]] = None, **kwargs):
|
| 77 |
return self._call(messages, stop, **kwargs)
|
|
|
|
| 78 |
from typing import Optional, List, Dict, Any
|
| 79 |
from langchain.llms.base import LLM
|
| 80 |
import requests, os, json
|
| 81 |
from requests.adapters import HTTPAdapter, Retry
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
'''
|
| 83 |
class GitHubModelLLM(LLM):
|
| 84 |
"""GitHub Models API ๊ธฐ๋ฐ LangChain LLM (๋ํ ๋ฉ๋ชจ๋ฆฌ ํตํฉ ์ง์)"""
|
|
|
|
| 19 |
from requests.adapters import HTTPAdapter, Retry
|
| 20 |
from typing import List, Optional, Dict, Any
|
| 21 |
|
| 22 |
+
'''
|
| 23 |
class GitHubModelLLM(LLM):
|
| 24 |
"""GitHub Models API๋ฅผ ์ฌ์ฉํ๋ ChatOpenAI ๋์ฒด ํด๋์ค"""
|
| 25 |
|
|
|
|
| 72 |
res = self._post(body)
|
| 73 |
content = res.get("choices", [{}])[0].get("message", {}).get("content", "")
|
| 74 |
return content
|
|
|
|
| 75 |
async def _acall(self, messages: List[Any], stop: Optional[List[str]] = None, **kwargs):
|
| 76 |
return self._call(messages, stop, **kwargs)
|
| 77 |
+
'''
|
| 78 |
from typing import Optional, List, Dict, Any
|
| 79 |
from langchain.llms.base import LLM
|
| 80 |
import requests, os, json
|
| 81 |
from requests.adapters import HTTPAdapter, Retry
|
| 82 |
+
from typing import Optional, List, Dict, Any
|
| 83 |
+
from langchain.llms.base import LLM
|
| 84 |
+
from langchain.schema import HumanMessage, AIMessage, SystemMessage
|
| 85 |
+
import requests, os, json
|
| 86 |
+
from requests.adapters import HTTPAdapter, Retry
|
| 87 |
+
|
| 88 |
+
class GitHubModelLLM(LLM):
|
| 89 |
+
"""GitHub Models API ๊ธฐ๋ฐ LangChain LLM (๋ํ ๋ฉ๋ชจ๋ฆฌ ํตํฉ ์ง์)"""
|
| 90 |
+
model: str = "openai/gpt-4.1"
|
| 91 |
+
endpoint: str = "https://models.github.ai/inference"
|
| 92 |
+
token: Optional[str] = os.environ.get("token")
|
| 93 |
+
system_prompt: Optional[str] = (
|
| 94 |
+
"๋๋ PIXAL(Primary Interactive X-ternal Assistant with multi Language)์ด์ผ. "
|
| 95 |
+
"๋์ ๊ฐ๋ฐ์๋ ์ ์ฑ์ค ์ด๋ผ๋ 6ํ๋
ํ์ด์ฌ ํ๋ก๊ทธ๋๋จธ์ผ. "
|
| 96 |
+
"์ด๊ฒ์ ์์คํ
๋ฉ์์ง์
๋๋ค. ์ฐธ๊ณ ํ์ญ์์ค. "
|
| 97 |
+
"์ด ๋ฉ์์ง๋ ์ฌ์ฉ์๊ฐ ๋ณด๋ธ๊ฒ์ด ์๋๋๋ค."
|
| 98 |
+
)
|
| 99 |
+
request_timeout: float = 30.0
|
| 100 |
+
max_retries: int = 2
|
| 101 |
+
backoff_factor: float = 0.3
|
| 102 |
+
|
| 103 |
+
@property
|
| 104 |
+
def _llm_type(self) -> str:
|
| 105 |
+
return "github_models_api"
|
| 106 |
+
|
| 107 |
+
def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]:
|
| 108 |
+
token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
|
| 109 |
+
session = requests.Session()
|
| 110 |
+
retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor,
|
| 111 |
+
status_forcelist=[429, 500, 502, 503, 504])
|
| 112 |
+
session.mount("https://", HTTPAdapter(max_retries=retries))
|
| 113 |
+
session.headers.update({
|
| 114 |
+
"Content-Type": "application/json",
|
| 115 |
+
"Authorization": f"Bearer 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"
|
| 116 |
+
})
|
| 117 |
+
resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout)
|
| 118 |
+
resp.raise_for_status()
|
| 119 |
+
return resp.json()
|
| 120 |
+
|
| 121 |
+
def _call(
|
| 122 |
+
self,
|
| 123 |
+
prompt: str,
|
| 124 |
+
stop: Optional[List[str]] = None,
|
| 125 |
+
**kwargs
|
| 126 |
+
) -> str:
|
| 127 |
+
"""๋ํ ๋ฉ๋ชจ๋ฆฌ(chat_history)๋ฅผ ํฌํจํ์ฌ ๋ชจ๋ธ ํธ์ถ"""
|
| 128 |
+
messages = []
|
| 129 |
+
|
| 130 |
+
# ๐ง ์์คํ
ํ๋กฌํํธ ์ถ๊ฐ
|
| 131 |
+
if self.system_prompt:
|
| 132 |
+
messages.append({"role": "system", "content": self.system_prompt})
|
| 133 |
+
|
| 134 |
+
# ๐งฉ ๋ฉ๋ชจ๋ฆฌ ๋ด์ฉ์ด ์์ผ๋ฉด ์ด์ ๋ํ ์ฝ์
|
| 135 |
+
memory = kwargs.get("memory")
|
| 136 |
+
if memory and hasattr(memory, "chat_memory"):
|
| 137 |
+
for msg in memory.chat_memory.messages:
|
| 138 |
+
if isinstance(msg, HumanMessage):
|
| 139 |
+
messages.append({"role": "user", "content": msg.content})
|
| 140 |
+
elif isinstance(msg, AIMessage):
|
| 141 |
+
messages.append({"role": "assistant", "content": msg.content})
|
| 142 |
+
elif isinstance(msg, SystemMessage):
|
| 143 |
+
messages.append({"role": "system", "content": msg.content})
|
| 144 |
+
|
| 145 |
+
# ๐ค ์ฌ์ฉ์ ์
๋ ฅ ์ถ๊ฐ
|
| 146 |
+
messages.append({"role": "user", "content": prompt})
|
| 147 |
+
|
| 148 |
+
body = {"model": self.model, "messages": messages}
|
| 149 |
+
if stop:
|
| 150 |
+
body["stop"] = stop
|
| 151 |
+
|
| 152 |
+
# ๐ ๋ชจ๋ธ ํธ์ถ
|
| 153 |
+
res = self._post_chat(body)
|
| 154 |
+
msg = res.get("choices", [{}])[0].get("message", {})
|
| 155 |
+
return msg.get("content") or json.dumps(msg.get("function_call", {}))
|
| 156 |
'''
|
| 157 |
class GitHubModelLLM(LLM):
|
| 158 |
"""GitHub Models API ๊ธฐ๋ฐ LangChain LLM (๋ํ ๋ฉ๋ชจ๋ฆฌ ํตํฉ ์ง์)"""
|