File size: 16,491 Bytes
1f04f3b
0e28c5a
9a1e41c
 
80b8c25
f00fd2c
 
8ab7a09
d5dbd1f
3ed76ff
f00fd2c
 
 
ad8b7d1
9a1e41c
2005ffc
 
 
1ae7b50
bb25ce9
9337589
2005ffc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9337589
2005ffc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9337589
 
bb25ce9
1f04f3b
 
 
bb25ce9
 
 
 
fb3ffb2
1d22c32
1f04f3b
1d22c32
 
f00fd2c
a22ee43
1d22c32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f04f3b
1d22c32
 
 
 
 
1f04f3b
 
 
 
 
 
 
 
 
 
 
1d22c32
1f04f3b
 
 
 
 
 
 
 
 
 
 
 
1d22c32
 
1f04f3b
 
1d22c32
 
 
fb3ffb2
2005ffc
fb3ffb2
 
 
 
 
a22ee43
fb3ffb2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2005ffc
c7188b5
1f04f3b
c7188b5
adab5f4
06d71cb
5599951
 
5ad218e
2ada989
1f04f3b
f00fd2c
bb5afe8
1f04f3b
0e23466
3ed76ff
0e23466
 
5ad218e
 
f00fd2c
1f04f3b
f00fd2c
 
1f04f3b
f00fd2c
4e3cea8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f04f3b
1ae7b50
af5abb6
1ae7b50
 
 
2c6ec22
2ec7f88
 
 
47adcab
81db18d
c186dc1
81db18d
 
 
 
 
f28aec8
c186dc1
 
 
 
 
 
 
 
 
1ae7b50
 
 
 
5ad218e
1ae7b50
29184d3
3fded2b
1ae7b50
 
 
 
b8e83f6
 
3fded2b
b8e83f6
 
 
3fded2b
b8e83f6
8f6e2f3
1f04f3b
 
 
295f49c
 
 
 
1f04f3b
295f49c
1f04f3b
 
295f49c
ca0fdb0
fbe00d7
295f49c
1f04f3b
295f49c
cc8a692
 
f00fd2c
cc8a692
1f04f3b
 
 
703f693
1b3adec
1f04f3b
703f693
 
4db1006
1f04f3b
 
 
 
 
 
 
61404fd
 
1f04f3b
 
 
 
 
 
 
 
 
 
 
 
 
 
61404fd
 
1f04f3b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
# app_pixal_chat.py
import os, re, json, gradio as gr
from langchain.memory import ConversationBufferMemory
from langchain.agents import initialize_agent, AgentType, load_tools
from langchain_community.tools import Tool
from langchain_experimental.tools.python.tool import PythonREPLTool
from langchain_community.retrievers import WikipediaRetriever
import datetime
#from langchain.schema import HumanMessage,AIMessage,SystemMessage
#from langchain_community.tools.youtube.search import YouTubeSearchTool as YTS
# ──────────────────────────────
# βœ… GitHubModelLLM (κ·ΈλŒ€λ‘œ μœ μ§€)
# ──────────────────────────────
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
import requests, os, json
from requests.adapters import HTTPAdapter, Retry
from typing import List, Optional, Dict, Any

'''
class GitHubModelLLM(LLM):
    """GitHub Models APIλ₯Ό μ‚¬μš©ν•˜λŠ” ChatOpenAI λŒ€μ²΄ 클래슀"""

    model_name: str = "openai/gpt-4.1"
    endpoint: str = "https://models.github.ai/inference"
    token: Optional[str] = os.environ.get("token")
    request_timeout: float = 30.0
    max_retries: int = 2
    backoff_factor: float = 0.3
    system_prompt: Optional[str] ="λ„ˆλŠ” PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야. λ„ˆμ˜ κ°œλ°œμžλŠ” μ •μ„±μœ€ μ΄λΌλŠ” 6ν•™λ…„ 파이썬 ν”„λ‘œκ·Έλž˜λ¨Έμ•Ό.이 λ©”μ‹œμ§€λŠ” μ‚¬μš©μžκ°€ 보낸것이 μ•„λ‹™λ‹ˆλ‹€."
    @property
    def _llm_type(self) -> str:
        return "custom_chatopenai_github"

    def _post(self, body: Dict[str, Any]) -> Dict[str, Any]:
        token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
        session = requests.Session()
        retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor,
                        status_forcelist=[429, 500, 502, 503, 504])
        session.mount("https://", HTTPAdapter(max_retries=retries))
        session.headers.update({
            "Authorization": f"Bearer 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",
            "Content-Type": "application/json",
        })
        resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout)
        resp.raise_for_status()
        return resp.json()

    def _call(self, messages: List[Any], stop: Optional[List[str]] = None, **kwargs):
        """ConversationBufferMemory의 이전 λŒ€ν™”κΉŒμ§€ 포함해 λ©”μ‹œμ§€ 생성"""
        msg_list = []

        # μ‹œμŠ€ν…œ ν”„λ‘¬ν”„νŠΈ
        if self.system_prompt:
            msg_list.append({"role": "system", "content": self.system_prompt})

        # λ©”λͺ¨λ¦¬ 포함 (human/ai λ©”μ‹œμ§€ λͺ¨λ‘)
        for msg in messages:
            if isinstance(msg, HumanMessage):
                msg_list.append({"role": "user", "content": msg.content})
            elif isinstance(msg, AIMessage):
                msg_list.append({"role": "assistant", "content": msg.content})
            elif isinstance(msg, SystemMessage):
                msg_list.append({"role": "system", "content": msg.content})

        body = {"model": self.model_name, "messages": msg_list}
        if stop:
            body["stop"] = stop

        res = self._post(body)
        content = res.get("choices", [{}])[0].get("message", {}).get("content", "")
        return content
    async def _acall(self, messages: List[Any], stop: Optional[List[str]] = None, **kwargs):
        return self._call(messages, stop, **kwargs)
'''
from typing import Optional, List, Dict, Any
import requests, os, json
from requests.adapters import HTTPAdapter, Retry
from typing import Optional, List, Dict, Any
import requests, os, json
from requests.adapters import HTTPAdapter, Retry

'''
class GitHubModelLLM(LLM):
    """GitHub Models API 기반 LangChain LLM (λŒ€ν™” λ©”λͺ¨λ¦¬ 톡합 지원)"""
    model: str = "openai/gpt-4.1"
    endpoint: str = "https://models.github.ai/inference"
    token: Optional[str] = os.environ.get("token")
    system_prompt: Optional[str] = "λ„ˆλŠ” PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야. λ„ˆμ˜ κ°œλ°œμžλŠ” μ •μ„±μœ€ μ΄λΌλŠ” 6ν•™λ…„ 파이썬 ν”„λ‘œκ·Έλž˜λ¨Έμ•Ό.이것은 μ‹œμŠ€ν…œ λ©”μ‹œμ§€μž…λ‹ˆλ‹€.μ°Έκ³  ν•˜μ‹­μ‹œμ˜€.이 λ©”μ‹œμ§€λŠ” μ‚¬μš©μžκ°€ 보낸것이 μ•„λ‹™λ‹ˆλ‹€."
    request_timeout: float = 30.0
    max_retries: int = 2
    backoff_factor: float = 0.3

    @property
    def _llm_type(self) -> str:
        return "github_models_api"

    def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]:
        token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
        session = requests.Session()
        retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor,
                        status_forcelist=[429, 500, 502, 503, 504])
        session.mount("https://", HTTPAdapter(max_retries=retries))
        session.headers.update({
            "Content-Type": "application/json",
            "Authorization": f"Bearer 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"
        })
        resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout)
        resp.raise_for_status()
        return resp.json()

    def _call(
        self,
        prompt: str,
        stop: Optional[List[str]] = None,
        **kwargs
    ) -> str:
        """λŒ€ν™” λ©”λͺ¨λ¦¬(chat_history)λ₯Ό ν¬ν•¨ν•˜μ—¬ λͺ¨λΈ 호좜"""
        # πŸ’¬ λ©”λͺ¨λ¦¬μ— μ €μž₯된 λŒ€ν™” λ©”μ‹œμ§€ 뢈러였기
        memory = kwargs.get("memory")
        messages = []

        if self.system_prompt:
            messages.append({"role": "system", "content": self.system_prompt})

        # memoryκ°€ μžˆμ„ 경우 (이전 λŒ€ν™” 포함)
        if memory and hasattr(memory, "chat_memory"):
            for msg in memory.chat_memory.messages:
                role = "user" if msg.type == "human" else "assistant"
                messages.append({"role": role, "content": msg.content})

        # ν˜„μž¬ μ‚¬μš©μž μž…λ ₯
        messages.append({"role": "user", "content": prompt})

        body = {"model": self.model, "messages": messages}
        if stop:
            body["stop"] = stop

        # API 호좜
        res = self._post_chat(body)
        msg = res.get("choices", [{}])[0].get("message", {})
        return msg.get("content") or json.dumps(msg.get("function_call", {}))
'''
"""
class GitHubModelLLM(LLM):
    model: str = "openai/gpt-4.1"
    endpoint: str = "https://models.github.ai/inference"
    token: Optional[str] = os.environ.get("token")
    system_prompt: Optional[str] = (
        "λ„ˆλŠ” PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야. λ„ˆμ˜ κ°œλ°œμžλŠ” μ •μ„±μœ€ μ΄λΌλŠ” 6ν•™λ…„ 파이썬 ν”„λ‘œκ·Έλž˜λ¨Έμ•Ό.이 λ©”μ‹œμ§€λŠ” μ‚¬μš©μžκ°€ 보낸것이 μ•„λ‹™λ‹ˆλ‹€.")
    request_timeout: float = 30.0
    max_retries: int = 2
    backoff_factor: float = 0.3

    @property
    def _llm_type(self) -> str:
        return "github_models_api"

    def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]:
        token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
        if not token:
            raise ValueError("❌ GitHub token이 μ„€μ •λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.")
        session = requests.Session()
        retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor,
                        status_forcelist=[429, 500, 502, 503, 504])
        session.mount("https://", HTTPAdapter(max_retries=retries))
        session.headers.update({
            "Content-Type": "application/json",
            "Authorization": f"Bearer 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"
        })
        resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout)
        resp.raise_for_status()
        return resp.json()

    def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
        memory = kwargs.get("memory")
        messages = []

        # 1️⃣ μ‹œμŠ€ν…œ ν”„λ‘¬ν”„νŠΈ
        if self.system_prompt:
            messages.append({"role": "system", "content": self.system_prompt})

        # 2️⃣ λ©”λͺ¨λ¦¬μ— μ €μž₯된 이전 λŒ€ν™” 포함
        if memory and hasattr(memory, "chat_memory"):
            for msg in memory.chat_memory.messages:
                if hasattr(msg, "type") and msg.type == "human":
                    messages.append({"role": "user", "content": msg.content})
                elif hasattr(msg, "type") and msg.type == "ai":
                    messages.append({"role": "assistant", "content": msg.content})

        # 3️⃣ ν˜„μž¬ μ‚¬μš©μž μž…λ ₯
        messages.append({"role": "user", "content": prompt})

        body = {"model": self.model, "messages": messages}
        if stop:
            body["stop"] = stop

        res = self._post_chat(body)
        msg = res.get("choices", [{}])[0].get("message", {})
        return msg.get("content") or json.dumps(msg.get("function_call", {}))
"""
# ──────────────────────────────
# βœ… LangChain 도ꡬ & μ—μ΄μ „νŠΈ ꡬ성
# ──────────────────────────────
import requests
from datetime import datetime as dt1
import requests
from datetime import datetime as dt1, timezone, timedelta
'''
tools = load_tools(["ddg-search", "arxiv"], llm=llm,allow_dangerous_tools=True)
tools.append(Tool(name="python_repl", func=PythonREPLTool().run, description="Python μ½”λ“œ μ‹€ν–‰ 도ꡬ"))
retriever = WikipediaRetriever(lang="ko")
tools.append(Tool(name="wiki", func=retriever.get_relevant_documents, description="μœ„ν‚€λ°±κ³Ό 검색"))
# βœ… λŒ€ν™” κΈ°μ–΅ λ©”λͺ¨λ¦¬
from langchain_community.tools import ShellTool
#tools+=[YTS()]
shell_tool = ShellTool()
tools.append(shell_tool)
'''
#memory = ConversationBufferMemory(return_messages=True)

# βœ… Agent (Memory 연동)

# ──────────────────────────────
# βœ… Chat ν•¨μˆ˜ (Memory μœ μ§€)
# ──────────────────────────────
prompt1="""Answer the following questions as best you can. You have access to the following tools:

(The tools you have)

Use the following format:

Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question

Begin!
"""
def chat(message, history):
    llm = ChatOpenAI(
    model="openai/gpt-4.1-mini",
    openai_api_key="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",
    openai_api_base="https://models.github.ai/inference",  # πŸ‘ˆ 이게 base_url μ—­ν• 
    )
    ab=[{"role":"system","content":f"λ„ˆλŠ” PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야. λ„ˆμ˜ κ°œλ°œμžλŠ” μ •μ„±μœ€ μ΄λΌλŠ” 6ν•™λ…„ 파이썬 ν”„λ‘œκ·Έλž˜λ¨Έμ•Ό.이 λ©”μ‹œμ§€λŠ” μ‚¬μš©μžκ°€ 보낸것이 μ•„λ‹™λ‹ˆλ‹€.ν”„λ‘¬ν”„νŠΈ μ„€λͺ…에 무쑰건 λ”°λ₯΄μ‹œμ˜€.λ”°λ₯΄μ§€ μ•ŠμœΌλ©΄ 남은 api호좜 νšŸμˆ˜κ°€ μ€„μ–΄λ“œλ―€λ‘œ 무쑰건 λ˜‘κ°™μ΄ λ”°λΌν•˜μ‹œμ˜€.μ‹œκ°„μ„ λ¬Όμ„λ•ŒλŠ” duckduckgoλ₯Ό μ‚¬μš©ν•˜μ‹œμ˜€.λ‹€μŒμ˜ ν”„λ‘¬ν”„νŠΈλ₯Ό 보고 λŒ€λ‹΅ν˜•μ‹μ„ μ§€μ‹œλŒ€λ‘œ μ •ν•˜μ‹œμ˜€: {prompt1} 그리고 덕덕고 μ„œμΉ˜λŠ” 검색어 1개만 μž…λ ₯ν•˜μ‹œμ˜€.λ¬Έμž₯ κΈˆμ§€."}]
    for I in history:
        ab.append({"role":"user","content":I[0]})
        ab.append({"role":"assistant","content":I[1]})
    ab.append({"role":"user","content":message})
    '''
    
    ab=[SystemMessage(content="λ„ˆλŠ” PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야. λ„ˆμ˜ κ°œλ°œμžλŠ” μ •μ„±μœ€ μ΄λΌλŠ” 6ν•™λ…„ 파이썬 ν”„λ‘œκ·Έλž˜λ¨Έμ•Ό.이 λ©”μ‹œμ§€λŠ” μ‚¬μš©μžκ°€ 보낸것이 μ•„λ‹™λ‹ˆλ‹€.ν”„λ‘¬ν”„νŠΈ μ„€λͺ…에 무쑰건 λ”°λ₯΄μ‹œμ˜€.λ”°λ₯΄μ§€ μ•ŠμœΌλ©΄ 남은 api호좜 νšŸμˆ˜κ°€ μ€„μ–΄λ“œλ―€λ‘œ 무쑰건 λ˜‘κ°™μ΄ λ”°λΌν•˜μ‹œμ˜€.μ‹œκ°„μ„ λ¬Όμ„λ•ŒλŠ” duckduckgoλ₯Ό μ‚¬μš©ν•˜μ‹œμ˜€.")]
    for I in history:
        ab.append(HumanMessage(content=I[0]))
        ab.append(AIMessage(content=I[1]))
    ab.append(HumanMessage(content=message))
    '''
    tools = load_tools(["ddg-search", "arxiv"], llm=llm,allow_dangerous_tools=True)
    tools.append(Tool(name="python_repl", func=PythonREPLTool().run, description="Python μ½”λ“œ μ‹€ν–‰ 도ꡬ"))
    retriever = WikipediaRetriever(lang="ko")
    tools.append(Tool(name="wiki", func=retriever.get_relevant_documents, description="μœ„ν‚€λ°±κ³Ό 검색"))
    # βœ… λŒ€ν™” κΈ°μ–΅ λ©”λͺ¨λ¦¬
    from langchain_community.tools import ShellTool
    #tools+=[YTS()]
    shell_tool = ShellTool()
    tools.append(shell_tool)
    agent = initialize_agent(
    tools,
    llm,
    agent_type=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
    #memory=memory,
    verbose=True,
    handle_parsing_errors=True,
    max_iterations=20
    )
    try:
        raw = agent.invoke(ab)["output"]
    except:
        try:
            llm = ChatOpenAI(model="openai/gpt-4.1-mini",openai_api_key="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",openai_api_base="https://models.github.ai/inference")
            agent=initialize_agent(tools,llm,agent_type=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,verbose=True,handle_parsing_errors=True,max_iterations=20)
            raw=agent.invoke(ab)["output"]
        except:
            llm = ChatOpenAI(model="openai/gpt-4.1-mini",openai_api_key="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",openai_api_base="https://models.github.ai/inference")
            agent=initialize_agent(tools,llm,agent_type=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,verbose=True,handle_parsing_errors=True,max_iterations=20)
            raw=agent.invoke(ab)["output"]
    try:
        # λŒ€ν™” 기둝을 LangChain memory에 반영
        # JSON ν˜•νƒœλ‘œ λ°˜ν™˜ μ‹œ νŒŒμ‹±
        text = str(raw)
        match = re.search(r"\{.*\}", text, re.DOTALL)
        if match:
            try:
                obj = json.loads(match.group(0))
                text = obj.get("action_input") or obj.get("Final Answer") or obj.get("content") or text
            except Exception:
                pass
        # AI 응닡을 memory에 μΆ”κ°€
    except Exception as e:
        text = str(raw)
    

    history = history + [(message, text)]
    return history, history, ""

# ──────────────────────────────
# βœ… Gradio UI (ChatGPT μŠ€νƒ€μΌ)
# ──────────────────────────────
with gr.Blocks(theme=gr.themes.Soft(), title="PIXAL Assistant") as demo:
    gr.HTML("""
    <div style="background:#f1f5f9;padding:12px;border-bottom:1px solid #d1d5db;
                display:flex;align-items:center;justify-content:space-between;">
        <h2 style="margin:0;">πŸ€– PIXAL Assistant</h2>
        <span style="font-size:0.9em;color:#555;">LangChain + GitHub LLM</span>
    </div>
    """)

    chatbot = gr.Chatbot(
        label=None,
        height=720,
        bubble_full_width=False,
        render_markdown=True,
        avatar_images=("https://avatars.githubusercontent.com/u/9919?s=280&v=4", None),
    )

    with gr.Row():
        msg = gr.Textbox(placeholder="λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•˜μ„Έμš”...", show_label=False, scale=8)
        send = gr.Button("전솑", variant="primary", scale=1)
        clear = gr.Button("🧹 μ΄ˆκΈ°ν™”", scale=1)

    msg.submit(chat, [msg, chatbot], [chatbot, chatbot, msg])
    send.click(chat, [msg, chatbot], [chatbot, chatbot, msg])
    clear.click(lambda: None, None, chatbot, queue=False)

    gr.Markdown("""
    <div style="text-align:center;color:#777;font-size:0.85em;margin-top:8px;">
    πŸ’‘ λŒ€ν™” 기둝은 μ„Έμ…˜ λ™μ•ˆ μœ μ§€λ©λ‹ˆλ‹€.  
    Made with ❀️ by PIXAL
    </div>
    """)

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=7860)