peterpeter8585 commited on
Commit
bb25ce9
ยท
verified ยท
1 Parent(s): c981802

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -2
app.py CHANGED
@@ -19,7 +19,7 @@ import requests, os, json
19
  from requests.adapters import HTTPAdapter, Retry
20
  from typing import List, Optional, Dict, Any
21
 
22
-
23
  class GitHubModelLLM(LLM):
24
  """GitHub Models API๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ChatOpenAI ๋Œ€์ฒด ํด๋ž˜์Šค"""
25
 
@@ -72,13 +72,87 @@ class GitHubModelLLM(LLM):
72
  res = self._post(body)
73
  content = res.get("choices", [{}])[0].get("message", {}).get("content", "")
74
  return content
75
-
76
  async def _acall(self, messages: List[Any], stop: Optional[List[str]] = None, **kwargs):
77
  return self._call(messages, stop, **kwargs)
 
78
  from typing import Optional, List, Dict, Any
79
  from langchain.llms.base import LLM
80
  import requests, os, json
81
  from requests.adapters import HTTPAdapter, Retry
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  '''
83
  class GitHubModelLLM(LLM):
84
  """GitHub Models API ๊ธฐ๋ฐ˜ LangChain LLM (๋Œ€ํ™” ๋ฉ”๋ชจ๋ฆฌ ํ†ตํ•ฉ ์ง€์›)"""
 
19
  from requests.adapters import HTTPAdapter, Retry
20
  from typing import List, Optional, Dict, Any
21
 
22
+ '''
23
  class GitHubModelLLM(LLM):
24
  """GitHub Models API๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ChatOpenAI ๋Œ€์ฒด ํด๋ž˜์Šค"""
25
 
 
72
  res = self._post(body)
73
  content = res.get("choices", [{}])[0].get("message", {}).get("content", "")
74
  return content
 
75
  async def _acall(self, messages: List[Any], stop: Optional[List[str]] = None, **kwargs):
76
  return self._call(messages, stop, **kwargs)
77
+ '''
78
  from typing import Optional, List, Dict, Any
79
  from langchain.llms.base import LLM
80
  import requests, os, json
81
  from requests.adapters import HTTPAdapter, Retry
82
+ from typing import Optional, List, Dict, Any
83
+ from langchain.llms.base import LLM
84
+ from langchain.schema import HumanMessage, AIMessage, SystemMessage
85
+ import requests, os, json
86
+ from requests.adapters import HTTPAdapter, Retry
87
+
88
+ class GitHubModelLLM(LLM):
89
+ """GitHub Models API ๊ธฐ๋ฐ˜ LangChain LLM (๋Œ€ํ™” ๋ฉ”๋ชจ๋ฆฌ ํ†ตํ•ฉ ์ง€์›)"""
90
+ model: str = "openai/gpt-4.1"
91
+ endpoint: str = "https://models.github.ai/inference"
92
+ token: Optional[str] = os.environ.get("token")
93
+ system_prompt: Optional[str] = (
94
+ "๋„ˆ๋Š” PIXAL(Primary Interactive X-ternal Assistant with multi Language)์ด์•ผ. "
95
+ "๋„ˆ์˜ ๊ฐœ๋ฐœ์ž๋Š” ์ •์„ฑ์œค ์ด๋ผ๋Š” 6ํ•™๋…„ ํŒŒ์ด์ฌ ํ”„๋กœ๊ทธ๋ž˜๋จธ์•ผ. "
96
+ "์ด๊ฒƒ์€ ์‹œ์Šคํ…œ ๋ฉ”์‹œ์ง€์ž…๋‹ˆ๋‹ค. ์ฐธ๊ณ  ํ•˜์‹ญ์‹œ์˜ค. "
97
+ "์ด ๋ฉ”์‹œ์ง€๋Š” ์‚ฌ์šฉ์ž๊ฐ€ ๋ณด๋‚ธ๊ฒƒ์ด ์•„๋‹™๋‹ˆ๋‹ค."
98
+ )
99
+ request_timeout: float = 30.0
100
+ max_retries: int = 2
101
+ backoff_factor: float = 0.3
102
+
103
+ @property
104
+ def _llm_type(self) -> str:
105
+ return "github_models_api"
106
+
107
+ def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]:
108
+ token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
109
+ session = requests.Session()
110
+ retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor,
111
+ status_forcelist=[429, 500, 502, 503, 504])
112
+ session.mount("https://", HTTPAdapter(max_retries=retries))
113
+ session.headers.update({
114
+ "Content-Type": "application/json",
115
+ "Authorization": f"Bearer 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"
116
+ })
117
+ resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout)
118
+ resp.raise_for_status()
119
+ return resp.json()
120
+
121
+ def _call(
122
+ self,
123
+ prompt: str,
124
+ stop: Optional[List[str]] = None,
125
+ **kwargs
126
+ ) -> str:
127
+ """๋Œ€ํ™” ๋ฉ”๋ชจ๋ฆฌ(chat_history)๋ฅผ ํฌํ•จํ•˜์—ฌ ๋ชจ๋ธ ํ˜ธ์ถœ"""
128
+ messages = []
129
+
130
+ # ๐Ÿง  ์‹œ์Šคํ…œ ํ”„๋กฌํ”„ํŠธ ์ถ”๊ฐ€
131
+ if self.system_prompt:
132
+ messages.append({"role": "system", "content": self.system_prompt})
133
+
134
+ # ๐Ÿงฉ ๋ฉ”๋ชจ๋ฆฌ ๋‚ด์šฉ์ด ์žˆ์œผ๋ฉด ์ด์ „ ๋Œ€ํ™” ์‚ฝ์ž…
135
+ memory = kwargs.get("memory")
136
+ if memory and hasattr(memory, "chat_memory"):
137
+ for msg in memory.chat_memory.messages:
138
+ if isinstance(msg, HumanMessage):
139
+ messages.append({"role": "user", "content": msg.content})
140
+ elif isinstance(msg, AIMessage):
141
+ messages.append({"role": "assistant", "content": msg.content})
142
+ elif isinstance(msg, SystemMessage):
143
+ messages.append({"role": "system", "content": msg.content})
144
+
145
+ # ๐Ÿ‘ค ์‚ฌ์šฉ์ž ์ž…๋ ฅ ์ถ”๊ฐ€
146
+ messages.append({"role": "user", "content": prompt})
147
+
148
+ body = {"model": self.model, "messages": messages}
149
+ if stop:
150
+ body["stop"] = stop
151
+
152
+ # ๐Ÿš€ ๋ชจ๋ธ ํ˜ธ์ถœ
153
+ res = self._post_chat(body)
154
+ msg = res.get("choices", [{}])[0].get("message", {})
155
+ return msg.get("content") or json.dumps(msg.get("function_call", {}))
156
  '''
157
  class GitHubModelLLM(LLM):
158
  """GitHub Models API ๊ธฐ๋ฐ˜ LangChain LLM (๋Œ€ํ™” ๋ฉ”๋ชจ๋ฆฌ ํ†ตํ•ฉ ์ง€์›)"""