peterpeter8585 commited on
Commit
f00fd2c
Β·
verified Β·
1 Parent(s): 8f6e2f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -534
app.py CHANGED
@@ -1,228 +1,23 @@
1
- # pixal_agent_full.py
2
- import os
3
- import datetime
4
- import gradio as gr
5
- import requests
6
- from typing import Optional, List
7
- from langchain.llms.base import LLM
8
- from langchain.agents import initialize_agent, AgentType,load_tools
9
- from langchain.agents import AgentExecutor, create_structured_chat_agent
10
- from langchain.tools import Tool
11
- from langchain_experimental.tools.python.tool import PythonREPLTool
12
- import queue
13
- from typing import Any, Dict
14
- import gradio as gr
15
- from langchain.callbacks.base import BaseCallbackHandler
16
-
17
-
18
- from langchain.tools import YouTubeSearchTool as YTS
19
- # 2. μ»€μŠ€ν…€ 콜백 ν•Έλ“€λŸ¬
20
-
21
- # github_model_llm.py
22
- """
23
- GitHub Models API 기반 LLM 래퍼 (LangChain LLM ν˜Έν™˜)
24
- - OpenAI-style chat completions ν˜Έν™˜
25
- - function calling (OPENAI_MULTI_FUNCTIONS) 지원: functions, function_call 전달 κ°€λŠ₯
26
- - system prompt (system_prompt) 지원
27
- - μ˜΅μ…˜: temperature, max_tokens, top_p λ“± 전달
28
- - raw response λ°˜ν™˜ λ©”μ„œλ“œ 포함
29
- """
30
-
31
  from typing import Optional, List, Dict, Any
32
- import os
33
- import time
34
- import json
35
- import requests
36
  from requests.adapters import HTTPAdapter, Retry
37
  from langchain.llms.base import LLM
 
 
 
 
 
 
 
 
38
 
39
- '''
40
- class GitHubModelLLM(LLM):
41
-
42
- def __init__(
43
- self,
44
- model: str = "openai/gpt-4.1",
45
- token: Optional[str] = os.environ["token"],
46
- endpoint: str = "https://models.github.ai/inference",
47
- system_prompt: Optional[str] = "λ„ˆλŠ” PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야.λ„ˆμ˜ κ°œλ°œμžλŠ” μ •μ„±μœ€ μ΄λΌλŠ” 6ν•™λ…„ 파이썬 ν”„λ‘œκ·Έλž˜λ¨Έμ•Ό.",
48
- request_timeout: float = 30.0,
49
- max_retries: int = 2,
50
- backoff_factor: float = 0.3,
51
- **kwargs,
52
- ):
53
- """
54
- Args:
55
- model: λͺ¨λΈ 이름 (예: "openai/gpt-4.1")
56
- token: GitHub Models API 토큰 (Bearer). ν™˜κ²½λ³€μˆ˜ GITHUB_TOKEN / token μ‚¬μš© κ°€λŠ₯ as fallback.
57
- endpoint: API endpoint (κΈ°λ³Έ: https://models.github.ai/inference)
58
- system_prompt: (선택) system role λ©”μ‹œμ§€λ‘œ 항상 μ•žμ— λΆ™μž„
59
- request_timeout: μš”μ²­ νƒ€μž„μ•„μ›ƒ (초)
60
- max_retries: λ„€νŠΈμ›Œν¬ μž¬μ‹œλ„ 횟수
61
- backoff_factor: μž¬μ‹œλ„ μ§€μˆ˜ 보정
62
- kwargs: LangChain LLM λΆ€λͺ¨μ— 전달할 μΆ”κ°€ 인자
63
- """
64
- super().__init__(**kwargs)
65
- self.model = model
66
- self.endpoint = endpoint.rstrip("/")
67
- self.token = token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
68
- self.system_prompt = system_prompt
69
- self.request_timeout = request_timeout
70
-
71
- # requests μ„Έμ…˜ + μž¬μ‹œλ„ μ„€μ •
72
- self.session = requests.Session()
73
- retries = Retry(total=max_retries, backoff_factor=backoff_factor,
74
- status_forcelist=[429, 500, 502, 503, 504],
75
- allowed_methods=["POST", "GET"])
76
- self.session.mount("https://", HTTPAdapter(max_retries=retries))
77
- self.session.headers.update({
78
- "Content-Type": "application/json"
79
- })
80
- if self.token:
81
- self.session.headers.update({"Authorization": f"Bearer {self.token}"})
82
-
83
- @property
84
- def _llm_type(self) -> str:
85
- return "github_models_api"
86
-
87
- # ---------- 편의 internal helper ----------
88
- def _build_messages(self, prompt: str, extra_messages: Optional[List[Dict[str, Any]]] = None) -> List[Dict[str, Any]]:
89
- """
90
- messages λ°°μ—΄ 생성: system (optional) + extra_messages (if any) + user prompt
91
- extra_messages: 이미 role keys둜 κ΅¬μ„±λœ λ©”μ‹œμ§€ 리슀트 (예: conversation history)
92
- """
93
- msgs: List[Dict[str, Any]] = []
94
- if self.system_prompt:
95
- msgs.append({"role": "system", "content": self.system_prompt})
96
- if extra_messages:
97
- # ensure format: list of {"role":..,"content":..}
98
- msgs.extend(extra_messages)
99
- msgs.append({"role": "user", "content": prompt})
100
- return msgs
101
-
102
- def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]:
103
- url = f"{self.endpoint}/chat/completions"
104
- # ensure Authorization present
105
- if "Authorization" not in self.session.headers and not self.token:
106
- raise ValueError("GitHub Models token not set. Provide token param or set GITHUB_TOKEN env var.")
107
-
108
- resp = self.session.post(url, json=body, timeout=self.request_timeout)
109
- try:
110
- resp.raise_for_status()
111
- except requests.HTTPError as e:
112
- # try to surface JSON error if present
113
- content = resp.text
114
- try:
115
- j = resp.json()
116
- content = json.dumps(j, ensure_ascii=False, indent=2)
117
- except Exception:
118
- pass
119
- raise RuntimeError(f"GitHub Models API error: {e} - {content}")
120
- return resp.json()
121
-
122
- # ---------- LangChain LLM interface ----------
123
- def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
124
- """
125
- LangChain LLM `_call` κ΅¬ν˜„ (동기).
126
- Supports kwargs:
127
- - functions: list[dict] (function schemas)
128
- - function_call: "auto" | {"name": "..."} | etc.
129
- - messages: list[dict] (if you want to pass full conversation instead of prompt)
130
- - temperature, top_p, max_tokens, n, stream, etc.
131
- Returns:
132
- assistant content (string). If function_call is returned by model, returns the 'content' if present,
133
- otherwise returns function_call object as JSON string (so caller can parse).
134
- """
135
- # support passing full messages via kwargs['messages']
136
- messages = None
137
- extra_messages = None
138
- if "messages" in kwargs and isinstance(kwargs["messages"], list):
139
- messages = kwargs.pop("messages")
140
- else:
141
- # optionally allow 'history' or 'extra_messages'
142
- extra_messages = kwargs.pop("extra_messages", None)
143
-
144
- if messages is None:
145
- messages = self._build_messages(prompt, extra_messages=extra_messages)
146
-
147
- body: Dict[str, Any] = {
148
- "model": self.model,
149
- "messages": messages,
150
- }
151
-
152
- # pass optional top-level params (temperature, max_tokens, etc.) from kwargs
153
- for opt in ["temperature", "top_p", "max_tokens", "n", "stream", "presence_penalty", "frequency_penalty"]:
154
- if opt in kwargs:
155
- body[opt] = kwargs.pop(opt)
156
-
157
- # pass function-calling related keys verbatim if provided
158
- if "functions" in kwargs:
159
- body["functions"] = kwargs.pop("functions")
160
- if "function_call" in kwargs:
161
- body["function_call"] = kwargs.pop("function_call")
162
-
163
- # include stop if present
164
- if stop:
165
- body["stop"] = stop
166
-
167
- # send request
168
- raw = self._post_chat(body)
169
-
170
- # save raw for caller if needed
171
- self._last_raw = raw
172
-
173
- # parse assistant message
174
- choices = raw.get("choices") or []
175
- if not choices:
176
- return ""
177
-
178
- message_obj = choices[0].get("message", {})
179
-
180
- # if assistant returned a function_call, include that info
181
- if "function_call" in message_obj:
182
- # return function_call as JSON string so agent/tool orchestrator can parse it
183
- # but if content also exists, prefer content
184
- func = message_obj["function_call"]
185
- # sometimes content may be absent; return structured JSON string
186
- return json.dumps({"function_call": func}, ensure_ascii=False)
187
-
188
- # otherwise return assistant content
189
- return message_obj.get("content", "") or ""
190
-
191
- # optional: expose raw response getter
192
- def last_raw_response(self) -> Optional[Dict[str, Any]]:
193
- return getattr(self, "_last_raw", None)
194
-
195
- # optional: provide a convenience chat method to get full message object
196
- def chat_completions(self, prompt: str, messages: Optional[List[Dict[str, Any]]] = None, **kwargs) -> Dict[str, Any]:
197
- """
198
- Directly call chat completions and return full parsed JSON response.
199
- - If `messages` provided, it's used as the full messages array (system/user/assistant roles as needed)
200
- - else uses prompt + system_prompt to construct messages.
201
- """
202
- if messages is None:
203
- messages = self._build_messages(prompt)
204
- body: Dict[str, Any] = {"model": self.model, "messages": messages}
205
- for opt in ["temperature", "top_p", "max_tokens", "n", "stream"]:
206
- if opt in kwargs:
207
- body[opt] = kwargs.pop(opt)
208
- if "functions" in kwargs:
209
- body["functions"] = kwargs.pop("functions")
210
- if "function_call" in kwargs:
211
- body["function_call"] = kwargs.pop("function_call")
212
- raw = self._post_chat(body)
213
- self._last_raw = raw
214
- return raw
215
- '''
216
- from typing import Optional, List, Dict, Any
217
- from langchain.llms.base import LLM
218
- import requests, os, json
219
- from requests.adapters import HTTPAdapter, Retry
220
-
221
  class GitHubModelLLM(LLM):
222
- """GitHub Models API 기반 LangChain LLM (Pydantic ν˜Έν™˜)"""
223
  model: str = "openai/gpt-4.1"
224
  endpoint: str = "https://models.github.ai/inference"
225
- token: Optional[str] = os.environ["token"]
226
  system_prompt: Optional[str] = "λ„ˆλŠ” PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야.λ„ˆμ˜ κ°œλ°œμžλŠ” μ •μ„±μœ€ μ΄λΌλŠ” 6ν•™λ…„ 파이썬 ν”„λ‘œκ·Έλž˜λ¨Έμ•Ό."
227
  request_timeout: float = 30.0
228
  max_retries: int = 2
@@ -236,7 +31,6 @@ class GitHubModelLLM(LLM):
236
  token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
237
  if not token:
238
  raise ValueError("❌ GitHub token이 μ„€μ •λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.")
239
-
240
  session = requests.Session()
241
  retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor,
242
  status_forcelist=[429, 500, 502, 503, 504])
@@ -250,182 +44,86 @@ class GitHubModelLLM(LLM):
250
  return resp.json()
251
 
252
  def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
253
- body = {
254
- "model": self.model,
255
- "messages": []
256
- }
257
  if self.system_prompt:
258
  body["messages"].append({"role": "system", "content": self.system_prompt})
259
  body["messages"].append({"role": "user", "content": prompt})
260
-
261
- for key in ["temperature", "max_tokens", "functions", "function_call"]:
262
- if key in kwargs:
263
- body[key] = kwargs[key]
264
  if stop:
265
  body["stop"] = stop
266
-
267
  res = self._post_chat(body)
268
  msg = res.get("choices", [{}])[0].get("message", {})
269
  return msg.get("content") or json.dumps(msg.get("function_call", {}))
270
 
271
- from langchain_community.retrievers import WikipediaRetriever
272
- from langchain.tools.retriever import create_retriever_tool
273
- retriever = WikipediaRetriever(lang="ko",top_k_results=10)
274
- wiki=Tool(func=retriever.get_relevant_documents,name="WIKI SEARCH",description="μœ„ν‚€λ°±κ³Όμ—μ„œ ν•„μš”ν•œ 정보λ₯Ό λΆˆλŸ¬μ˜΅λ‹ˆλ‹€.κ²°κ΄΄λ₯Ό κ²€μ¦ν•˜μ—¬ μ‚¬μš©ν•˜μ‹œμ˜€.")
275
  # ──────────────────────────────
276
- # βœ… GitHub Models LLM
277
  # ──────────────────────────────
278
- '''
279
- class GitHubModelLLM(LLM):
280
- model: str = "openai/gpt-4.1"
281
- endpoint: str = "https://models.github.ai/inference"
282
- token: Optional[str] = None
283
-
284
- @property
285
- def _llm_type(self) -> str:
286
- return "github_models_api"
287
-
288
- def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
289
- if not self.token:
290
- raise ValueError("GitHub API token이 ν•„μš”ν•©λ‹ˆλ‹€.")
291
-
292
- headers = {
293
- "Authorization": "Bearer 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",
294
- "Content-Type": "application/json",
295
- }
296
- body = {"model": self.model, "messages": [{"role": "user", "content": prompt}]}
297
 
298
- resp = requests.post(f"{self.endpoint}/chat/completions", json=body, headers=headers)
299
- if resp.status_code != 200:
300
- raise ValueError(f"API 였λ₯˜: {resp.status_code} - {resp.text}")
301
- return resp.json()["choices"][0]["message"]["content"]
302
- '''
303
  # ──────────────────────────────
304
- # βœ… LLM μ„€μ •
305
  # ──────────────────────────────
306
- token = os.getenv("GITHUB_TOKEN") or os.getenv("token")
307
- if not token:
308
- print("⚠️ GitHub Token이 ν•„μš”ν•©λ‹ˆλ‹€. 예: setx GITHUB_TOKEN your_token")
309
-
310
  llm = GitHubModelLLM()
311
-
312
- # ──────────────────────────────
313
- # βœ… LangChain κΈ°λ³Έ 도ꡬ 뢈러였기
314
- # ──────────────────────────────
315
- tools = load_tools(
316
- ["ddg-search", "requests_all", "llm-math"],
317
- llm=llm,allow_dangerous_tools=True
318
- )+[YTS()]+[wiki]
319
- # ──────────────────────────────
320
- # βœ… Python μ‹€ν–‰ 도ꡬ (LangChain λ‚΄μž₯)
321
- # ──────────────────────────────
322
- python_tool = PythonREPLTool()
323
- tools.append(Tool(name="python_repl", func=python_tool.run, description="Python μ½”λ“œλ₯Ό μ‹€ν–‰ν•©λ‹ˆλ‹€."))
324
- from langchain import hub
325
- prompt=hub.pull("hwchase17/structured-chat-agent")
326
- from langchain_community.tools.shell.tool import ShellTool
327
- shell_tool = ShellTool()
328
- tools.append(Tool(name="shell_exec", func=shell_tool.run, description="둜컬 λͺ…λ Ήμ–΄λ₯Ό μ‹€ν–‰ν•©λ‹ˆλ‹€."))
329
-
330
- # ──────────────────────────────
331
- # βœ… 파일 도ꡬ
332
- # ──────────────────────────────
333
-
334
-
335
- # ──────────────────────────────
336
- # βœ… μ •ν™•ν•œ ν•œκ΅­ μ‹œκ°„ ν•¨μˆ˜ (Asia/Seoul)
337
- # ──────────────────────────────
338
- import requests
339
- from zoneinfo import ZoneInfo
340
 
341
  def time_now(_=""):
342
- try:
343
- # μ •ν™•ν•œ UTC μ‹œκ°μ„ μ™ΈλΆ€ APIμ—μ„œ κ°€μ Έμ˜΄
344
- resp = requests.get("https://timeapi.io/api/Time/current/zone?timeZone=Asia/Seoul", timeout=5)
345
- if resp.status_code == 200:
346
- data = resp.json()
347
- dt = data["dateTime"].split(".")[0].replace("T", " ")
348
- return f"ν˜„μž¬ μ‹œκ°: {dt} (Asia/Seoul, μ„œλ²„ κΈ°μ€€ NTP 동기화)"
349
- else:
350
- # API μ‹€νŒ¨ μ‹œ 둜컬 μ‹œμŠ€ν…œ μ‹œκ°μœΌλ‘œ λŒ€μ²΄
351
- tz = ZoneInfo("Asia/Seoul")
352
- now = datetime.datetime.now(tz)
353
- return f"ν˜„μž¬ μ‹œκ°(둜컬): {now.strftime('%Y-%m-%d %H:%M:%S')} (Asia/Seoul)"
354
- except Exception as e:
355
- tz = ZoneInfo("Asia/Seoul")
356
- now = datetime.datetime.now(tz)
357
- return f"ν˜„μž¬ μ‹œκ°(λ°±μ—…): {now.strftime('%Y-%m-%d %H:%M:%S')} (Asia/Seoul, 였λ₯˜: {e})"
358
- # ──────────────────────────────
359
- # βœ… 도ꡬ 등둝
360
- # ──────────────────────────────
361
- tools.extend([Tool(name="time_now", func=time_now, description="ν˜„μž¬ μ‹œκ°„μ„ λ°˜ν™˜ν•©λ‹ˆλ‹€.")])
362
- from langchain.memory import ConversationBufferMemory as MEM
363
- from langchain.agents.agent_toolkits import FileManagementToolkit as FMT
364
- tools.extend(FMT(root_dir=str(os.getcwd())).get_tools())
365
- # ──────────────────────────────
366
- # βœ… Agent μ΄ˆκΈ°ν™”
367
- # ──────────────────────────────
368
- mem=MEM()
369
- agent=initialize_agent(tools,llm,agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,verbose=True,memory=mem)
370
- #agent = create_structured_chat_agent(llm, tools, prompt)
371
- #agent= AgentExecutor(agent=agent, tools=tools,memory=mem)
372
 
373
- # ... (μœ„μ˜ LLM, tools, agent μ„€μ • 뢀뢄은 동일)
374
-
375
-
376
- import json
377
 
378
  # ──────────────────────────────
379
- # βœ… λŒ€ν™” μš”μ•½ ν•¨μˆ˜
380
  # ──────────────────────────────
 
 
381
  def summarize_title(history):
382
- """λŒ€ν™” 제λͺ© μš”μ•½"""
383
- if not history: return "μƒˆ λŒ€ν™”"
384
- text = "\n".join(f"User:{h[0]} AI:{h[1]}" for h in history[-3:])
385
  try:
386
- title = llm._call(f"λ‹€μŒ λŒ€ν™”μ˜ 주제λ₯Ό ν•œ μ€„λ‘œ μš”μ•½ν•΄μ€˜:\n{text}")
387
- return title.strip().replace("\n", " ")[:60]
388
  except Exception:
389
  return "μš”μ•½ μ‹€νŒ¨"
390
 
391
- import pickle
392
- import os, datetime
393
-
394
- # ν˜„μž¬ λ””λ ‰ν„°λ¦¬λ‘œ κ³ μ •
395
- os.chdir(os.path.dirname(os.path.abspath(__file__)))
396
- os.makedirs("user_logs", exist_ok=True)
397
-
398
- # --- λŒ€ν™” 기둝 μ €μž₯/뢈러였기 ---
399
- def save_conversation(username, history):
400
- """둜그인된 μ‚¬μš©μžλŠ” μ—¬λŸ¬ λŒ€ν™”λ₯Ό ν•˜λ‚˜μ˜ pickle νŒŒμΌμ— μ €μž₯"""
401
- if not username or username.lower() == "guest":
402
  return
403
-
404
  fname = f"{username}.pkl"
405
- # κΈ°μ‘΄ 데이터 뢈러였기
406
  data = {}
407
  if os.path.exists(fname):
408
- try:
409
- with open(fname, "rb") as f:
410
- data = pickle.load(f)
411
- except Exception:
412
- data = {}
413
-
414
- # μƒˆλ‘œμš΄ λŒ€ν™” μš”μ•½ 제λͺ©κ³Ό ν•¨κ»˜ μΆ”κ°€
415
  title = summarize_title(history)
416
- data[title] = {
417
- "title": title,
418
- "updated": datetime.datetime.now().isoformat(),
419
- "history": history
420
- }
421
-
422
  with open(fname, "wb") as f:
423
  pickle.dump(data, f)
424
 
425
- # --- λŒ€ν™” 뢈러였기 ---
426
- def load_conversation(username, conv_title=None):
427
- """νŠΉμ • μ‚¬μš©μžμ™€ λŒ€ν™” 제λͺ©μœΌλ‘œ 뢈러였기"""
428
- if not username or username.lower() == "guest":
429
  return []
430
  fname = f"{username}.pkl"
431
  if not os.path.exists(fname):
@@ -435,31 +133,29 @@ def load_conversation(username, conv_title=None):
435
  if conv_title and conv_title in data:
436
  return data[conv_title]["history"]
437
  elif data:
438
- # κ°€μž₯ 졜근 λŒ€ν™” λ°˜ν™˜
439
  latest = max(data.values(), key=lambda x: x["updated"])
440
  return latest["history"]
441
  return []
442
 
443
- # --- λŒ€ν™” λͺ©λ‘ κ°±μ‹  ---
444
- def refresh_conversation_list(username=None):
445
- """μ‚¬μš©μžλ³„ μ €μž₯된 λŒ€ν™” 제λͺ© λͺ©λ‘μ„ μƒˆλ‘œκ³ μΉ¨"""
446
- if not username or username.lower() == "guest":
447
  return gr.update(choices=[], value=None)
448
  fname = f"{username}.pkl"
449
  if not os.path.exists(fname):
450
  return gr.update(choices=[], value=None)
 
 
 
 
 
 
 
 
 
451
  try:
452
- with open(fname, "rb") as f:
453
- data = pickle.load(f)
454
- titles = sorted(data.keys(), reverse=True)
455
- return gr.update(choices=titles, value=titles[0] if titles else None)
456
- except Exception as e:
457
- print(f"⚠️ λͺ©λ‘ 뢈러였기 였λ₯˜: {e}")
458
- return gr.update(choices=[], value=None)
459
- # --- chat ν•¨μˆ˜ μˆ˜μ • ---
460
- def chat(message, history, username="guest", conv_name="current"):
461
- try:
462
- raw_response = agent.run(message)
463
  text = str(raw_response)
464
 
465
  # JSON ν˜•μ‹ 응닡 νŒŒμ‹±
@@ -482,178 +178,45 @@ def chat(message, history, username="guest", conv_name="current"):
482
 
483
  # 기둝 μΆ”κ°€ 및 μ¦‰μ‹œ μ €μž₯
484
  history = history + [(message, output)]
485
- save_conversation(username, history, conv_name)
486
  return history, history, ""
487
 
488
- # --- 뢈러였기 λ²„νŠΌ ν•¨μˆ˜ ---
489
-
490
  # ──────────────────────────────
491
- # βœ… 둜그인 ν›„ μ‚¬μš©μž 정보 κ°€μ Έμ˜€κΈ°
492
  # ──────────────────────────────
493
- def get_hf_user(token):
494
- """HF OAuth ν† ν°μœΌλ‘œ μ‚¬μš©μž 정보 쑰회"""
495
- try:
496
- r = requests.get("https://huggingface.co/api/whoami-v2", headers={"Authorization": f"Bearer {token}"})
497
- if r.status_code == 200:
498
- data = r.json()
499
- return data.get("name") or data.get("email") or "unknown_user"
500
- except Exception:
501
- pass
502
- return "guest"
503
- import re, json
504
-
505
-
506
- '''
507
- def chat(message, history, hf_token):
508
- username = get_hf_user(hf_token) if hf_token else "guest"
509
- try:
510
- response = agent.invoke(message)
511
- if isinstance(response, dict):
512
- if "action_input" in response:
513
- response = response["action_input"]
514
- elif "output" in response:
515
- response = response["output"]
516
- elif "text" in response:
517
- response = response["text"]
518
- else:
519
- response = str(response)
520
- elif isinstance(response, str):
521
- # "Final Answer"κ°€ ν¬ν•¨λœ λ¬Έμžμ—΄μ΄λ©΄ κ·Έ λΆ€λΆ„λ§Œ μΆ”μΆœ
522
- if '"action_input":' in response:
523
- import re, json
524
- match = re.search(r'["\']action_input["\']\s*:\s*["\'](.*?)["\']', response)
525
- if match:
526
- response = match.group(1)
527
- elif "Final Answer" in response:
528
- # {"action": "Final Answer", "action_input": "..."} ν˜•μ‹μΌ λ•Œ
529
- try:
530
- data = json.loads(response)
531
- if isinstance(data, dict) and "action_input" in data:
532
- response = data["action_input"]
533
- except Exception:
534
- response = response.replace("Final Answer", "").strip()
535
- except Exception as e:
536
- response = f"⚠️ 였λ₯˜: {e}"
537
- history = history + [(message, response)]
538
- if username:
539
- save_conversation(username, history)
540
- return history, history, "" # μž…λ ₯ μ΄ˆκΈ°ν™”
541
- '''
542
- # 예: hf_token (ν˜Ήμ€ username) 을 μž…λ ₯으둜 받도둝 λ³€κ²½
543
 
 
 
 
544
 
545
-
546
-
547
- # ──────────────────────────────
548
- # βœ… Gradio UI with HF Auth
549
- # ──────────────────────────────
550
- with gr.Blocks(theme=gr.themes.Soft(), title="PIXAL Assistant (HF Auth)") as demo:
551
- gr.Markdown("## πŸ€– PIXAL Assistant β€” Hugging Face 계정 기반 λŒ€ν™” μ €μž₯")
552
-
553
- hf_login = gr.LoginButton()
554
- hf_token = gr.State()
555
-
556
- @hf_login.click(inputs=None, outputs=hf_token)
557
- def login(token): # 둜그인 ν›„ token λ°˜ν™˜
558
- return token
559
 
560
  with gr.Row():
561
- with gr.Column(scale=2):
562
- chatbot = gr.Chatbot(label="PIXAL λŒ€ν™”", height=600, render_markdown=True)
563
- msg = gr.Textbox(label="λ©”μ‹œμ§€", placeholder="μž…λ ₯ ν›„ Enter λ˜λŠ” 전솑 클릭")
564
- send = gr.Button("전솑")
565
- clear = gr.Button("μ΄ˆκΈ°ν™”")
566
 
567
  msg.submit(chat, [msg, chatbot, hf_token], [chatbot, chatbot, msg])
568
  send.click(chat, [msg, chatbot, hf_token], [chatbot, chatbot, msg])
569
  clear.click(lambda: None, None, chatbot, queue=False)
570
 
571
  with gr.Column(scale=1):
572
- gr.Markdown("### πŸ’Ύ μ €μž₯된 λŒ€ν™” 기둝")
573
- convo_files = gr.Dropdown(label="λŒ€ν™” 선택", choices=[])
574
- refresh_btn = gr.Button("πŸ”„ λͺ©λ‘ μƒˆλ‘œκ³ μΉ¨")
575
- load_btn = gr.Button("뢈러였기")
576
 
577
- refresh_btn.click(refresh_conversation_list, None, convo_files)
578
- load_btn.click(load_conversation, [convo_files], chatbot)
579
 
580
  if __name__ == "__main__":
581
  demo.launch(server_name="0.0.0.0", server_port=7860)
582
- '''
583
- def chat(message, history):
584
- try:
585
- response = agent.run(message)
586
-
587
- # JSON ν˜•νƒœλ‘œ 좜λ ₯될 κ°€λŠ₯성이 μžˆλŠ” 경우 처리
588
- if isinstance(response, dict):
589
- if "action_input" in response:
590
- response = response["action_input"]
591
- elif "output" in response:
592
- response = response["output"]
593
- elif "text" in response:
594
- response = response["text"]
595
- else:
596
- response = str(response)
597
- elif isinstance(response, str):
598
- # "Final Answer"κ°€ ν¬ν•¨λœ λ¬Έμžμ—΄μ΄λ©΄ κ·Έ λΆ€λΆ„λ§Œ μΆ”μΆœ
599
- if '"action_input":' in response:
600
- import re, json
601
- match = re.search(r'["\']action_input["\']\s*:\s*["\'](.*?)["\']', response)
602
- if match:
603
- response = match.group(1)
604
- elif "Final Answer" in response:
605
- # {"action": "Final Answer", "action_input": "..."} ν˜•μ‹μΌ λ•Œ
606
- try:
607
- data = json.loads(response)
608
- if isinstance(data, dict) and "action_input" in data:
609
- response = data["action_input"]
610
- except Exception:
611
- response = response.replace("Final Answer", "").strip()
612
-
613
- except Exception as e:
614
- response = f"⚠️ 였λ₯˜: {e}"
615
-
616
- history = history + [(message, response)]
617
- return history, history,""
618
-
619
- # ──────────────────────────────
620
- # βœ… Gradio UI
621
- # ──────────────────────────────
622
- def load_selected(file):
623
- return load_conversation(file)
624
-
625
- # ──────────────────────────────
626
- # βœ… Gradio UI
627
- # ──────────────────────────────
628
- with gr.Blocks(theme=gr.themes.Soft(), title="PIXAL Assistant") as demo:
629
- gr.Markdown("## πŸ€– PIXAL Assistant β€” LangChain 기반 λ©€ν‹°νˆ΄ μ—μ΄μ „νŠΈ")
630
-
631
- with gr.Row():
632
- with gr.Column(scale=2):
633
- chatbot = gr.Chatbot(label="PIXAL λŒ€ν™”", height=600)
634
- msg = gr.Textbox(label="λ©”μ‹œμ§€", placeholder="μž…λ ₯ ν›„ Enter λ˜λŠ” 전솑 클릭")
635
- send = gr.Button("전솑")
636
- clear = gr.Button("μ΄ˆκΈ°ν™”")
637
-
638
- username = gr.Textbox(label="Hugging Face μ‚¬μš©μžλͺ…", placeholder="둜그인 λŒ€μ‹  이름 μž…λ ₯", value=os.getenv("HF_USER", "guest"))
639
- msg.submit(chat, [msg, chatbot, username], [chatbot, chatbot, msg])
640
- send.click(chat, [msg, chatbot, username], [chatbot, chatbot, msg])
641
- clear.click(lambda: None, None, chatbot, queue=False)
642
-
643
- with gr.Column(scale=1):
644
- gr.Markdown("### πŸ’Ύ μ €μž₯된 λŒ€ν™” 기둝")
645
- convo_files = gr.Dropdown(label="λŒ€ν™” 선택", choices=[])
646
- refresh_btn = gr.Button("πŸ”„ λͺ©λ‘ μƒˆλ‘œκ³ μΉ¨")
647
- load_btn = gr.Button("뢈러였기")
648
-
649
- def refresh_list(user):
650
- if not user: return gr.Dropdown.update(choices=[])
651
- return gr.Dropdown.update(choices=[x[1] for x in list_conversations(user)])
652
-
653
- refresh_btn.click(refresh_list, [username], convo_files)
654
- load_btn.click(lambda f: load_conversation(f), [convo_files], chatbot)
655
-
656
- if __name__ == "__main__":
657
- demo.launch(server_name="0.0.0.0", server_port=7860)
658
-
659
- '''
 
1
+ import os, json, pickle, datetime, requests, re, gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  from typing import Optional, List, Dict, Any
 
 
 
 
3
  from requests.adapters import HTTPAdapter, Retry
4
  from langchain.llms.base import LLM
5
+ from langchain.agents import initialize_agent, AgentType, load_tools
6
+ from langchain.tools import Tool
7
+ from langchain.memory import ConversationBufferMemory
8
+ from langchain_experimental.tools.python.tool import PythonREPLTool
9
+ from langchain_community.retrievers import WikipediaRetriever
10
+ from langchain.tools.retriever import create_retriever_tool
11
+ from langchain_community.tools.shell.tool import ShellTool
12
+ from langchain.tools import YouTubeSearchTool
13
 
14
+ # ──────────────────────────────
15
+ # βœ… GitHubModelLLM (κ·ΈλŒ€λ‘œ μœ μ§€)
16
+ # ──────────────────────────────
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  class GitHubModelLLM(LLM):
 
18
  model: str = "openai/gpt-4.1"
19
  endpoint: str = "https://models.github.ai/inference"
20
+ token: Optional[str] = os.environ.get("token")
21
  system_prompt: Optional[str] = "λ„ˆλŠ” PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야.λ„ˆμ˜ κ°œλ°œμžλŠ” μ •μ„±μœ€ μ΄λΌλŠ” 6ν•™λ…„ 파이썬 ν”„λ‘œκ·Έλž˜λ¨Έμ•Ό."
22
  request_timeout: float = 30.0
23
  max_retries: int = 2
 
31
  token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
32
  if not token:
33
  raise ValueError("❌ GitHub token이 μ„€μ •λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.")
 
34
  session = requests.Session()
35
  retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor,
36
  status_forcelist=[429, 500, 502, 503, 504])
 
44
  return resp.json()
45
 
46
  def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
47
+ body = {"model": self.model, "messages": []}
 
 
 
48
  if self.system_prompt:
49
  body["messages"].append({"role": "system", "content": self.system_prompt})
50
  body["messages"].append({"role": "user", "content": prompt})
 
 
 
 
51
  if stop:
52
  body["stop"] = stop
 
53
  res = self._post_chat(body)
54
  msg = res.get("choices", [{}])[0].get("message", {})
55
  return msg.get("content") or json.dumps(msg.get("function_call", {}))
56
 
 
 
 
 
57
  # ──────────────────────────────
58
+ # βœ… HuggingFace API (ν”„λ‘œν•„)
59
  # ──────────────────────────────
60
+ def get_hf_userinfo(hf_token: str) -> dict:
61
+ try:
62
+ r = requests.get("https://huggingface.co/api/whoami-v2",
63
+ headers={"Authorization": f"Bearer {hf_token}"}, timeout=5)
64
+ if r.status_code == 200:
65
+ j = r.json()
66
+ return {
67
+ "name": j.get("name", "guest"),
68
+ "avatar": j.get("avatar", "https://huggingface.co/front/assets/huggingface_logo-noborder.svg")
69
+ }
70
+ except Exception:
71
+ pass
72
+ return {"name": "guest", "avatar": "https://huggingface.co/front/assets/huggingface_logo-noborder.svg"}
 
 
 
 
 
 
73
 
 
 
 
 
 
74
  # ──────────────────────────────
75
+ # βœ… Agent ꡬ성
76
  # ──────────────────────────────
 
 
 
 
77
  llm = GitHubModelLLM()
78
+ tools = load_tools(["ddg-search", "requests_all", "llm-math"], llm=llm, allow_dangerous_tools=True)
79
+ tools += [YouTubeSearchTool(), ShellTool(), PythonREPLTool()]
80
+ retriever = WikipediaRetriever(lang="ko")
81
+ retriever_tool = create_retriever_tool(retriever, name="wiki_search", description="μœ„ν‚€λ°±κ³Ό 검색 도ꡬ")
82
+ tools.append(retriever_tool)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
  def time_now(_=""):
85
+ now = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=9)))
86
+ return f"ν˜„μž¬ μ‹œκ°: {now.strftime('%Y-%m-%d %H:%M:%S')} (Asia/Seoul)"
87
+ tools.append(Tool(name="time_now", func=time_now, description="ν˜„μž¬ μ‹œκ°„μ„ λ°˜ν™˜ν•©λ‹ˆλ‹€."))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
90
+ agent = initialize_agent(tools, llm, agent_type=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
91
+ memory=memory, verbose=True)
 
92
 
93
  # ──────────────────────────────
94
+ # βœ… λŒ€ν™” μ €μž₯/λ‘œλ“œ
95
  # ──────────────────────────────
96
+ os.chdir(os.path.dirname(os.path.abspath(__file__)))
97
+
98
  def summarize_title(history):
99
+ if not history:
100
+ return "μƒˆ λŒ€ν™”"
101
+ text = "\n".join(f"User:{m} AI:{r}" for m, r in history[-3:])
102
  try:
103
+ title = llm._call(f"이 λŒ€ν™”μ˜ 주제λ₯Ό ν•œ μ€„λ‘œ μš”μ•½ν•΄μ€˜:\n{text}")
104
+ return title.strip().replace("\n", " ")[:50]
105
  except Exception:
106
  return "μš”μ•½ μ‹€νŒ¨"
107
 
108
+ def save_conversation(history, hf_token):
109
+ info = get_hf_userinfo(hf_token)
110
+ username = info["name"]
111
+ if username.lower() == "guest":
 
 
 
 
 
 
 
112
  return
 
113
  fname = f"{username}.pkl"
 
114
  data = {}
115
  if os.path.exists(fname):
116
+ with open(fname, "rb") as f:
117
+ data = pickle.load(f)
 
 
 
 
 
118
  title = summarize_title(history)
119
+ data[title] = {"title": title, "updated": datetime.datetime.now().isoformat(), "history": history}
 
 
 
 
 
120
  with open(fname, "wb") as f:
121
  pickle.dump(data, f)
122
 
123
+ def load_conversation(hf_token, conv_title=None):
124
+ info = get_hf_userinfo(hf_token)
125
+ username = info["name"]
126
+ if username.lower() == "guest":
127
  return []
128
  fname = f"{username}.pkl"
129
  if not os.path.exists(fname):
 
133
  if conv_title and conv_title in data:
134
  return data[conv_title]["history"]
135
  elif data:
 
136
  latest = max(data.values(), key=lambda x: x["updated"])
137
  return latest["history"]
138
  return []
139
 
140
+ def refresh_conversation_list(hf_token):
141
+ info = get_hf_userinfo(hf_token)
142
+ username = info["name"]
143
+ if username.lower() == "guest":
144
  return gr.update(choices=[], value=None)
145
  fname = f"{username}.pkl"
146
  if not os.path.exists(fname):
147
  return gr.update(choices=[], value=None)
148
+ with open(fname, "rb") as f:
149
+ data = pickle.load(f)
150
+ titles = sorted(data.keys(), reverse=True)
151
+ return gr.update(choices=titles, value=titles[0] if titles else None)
152
+
153
+ # ──────────────────────────────
154
+ # βœ… Chat ν•¨μˆ˜
155
+ # ──────────────────────────────
156
+ def chat(message, history,hf_token):
157
  try:
158
+ raw_response = agent.invoke(message)
 
 
 
 
 
 
 
 
 
 
159
  text = str(raw_response)
160
 
161
  # JSON ν˜•μ‹ 응닡 νŒŒμ‹±
 
178
 
179
  # 기둝 μΆ”κ°€ 및 μ¦‰μ‹œ μ €μž₯
180
  history = history + [(message, output)]
181
+ save_conversation(history, hf_token)
182
  return history, history, ""
183
 
 
 
184
  # ──────────────────────────────
185
+ # βœ… Gradio UI (ChatGPT μŠ€νƒ€μΌ)
186
  # ──────────────────────────────
187
+ with gr.Blocks(theme=gr.themes.Soft(), title="PIXAL Assistant (HuggingFace OAuth)") as demo:
188
+ with gr.Row(elem_id="header", style="background:#f5f5f5;padding:12px;border-bottom:1px solid #ddd;align-items:center;"):
189
+ gr.HTML("<h2 style='margin:0;padding-left:12px;'>πŸ€– PIXAL Assistant</h2>")
190
+ user_avatar = gr.Image(show_label=False, width=40, height=40, elem_id="avatar")
191
+ user_name = gr.Markdown("둜그인 ν•„μš”", elem_id="username", elem_classes="text-right")
192
+ login_btn = gr.LoginButton(label="πŸ” 둜그인", elem_id="login-btn")
193
+ hf_token = gr.State("")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
+ def on_login(token):
196
+ info = get_hf_userinfo(token)
197
+ return token, info["avatar"], f"**{info['name']}**"
198
 
199
+ login_btn.login(on_login, None, [hf_token, user_avatar, user_name])
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
  with gr.Row():
202
+ with gr.Column(scale=3):
203
+ chatbot = gr.Chatbot(label=None, height=600, render_markdown=True)
204
+ msg = gr.Textbox(placeholder="λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•˜μ„Έμš”...", show_label=False)
205
+ send = gr.Button("전솑", variant="primary")
206
+ clear = gr.Button("🧹 μ΄ˆκΈ°ν™”")
207
 
208
  msg.submit(chat, [msg, chatbot, hf_token], [chatbot, chatbot, msg])
209
  send.click(chat, [msg, chatbot, hf_token], [chatbot, chatbot, msg])
210
  clear.click(lambda: None, None, chatbot, queue=False)
211
 
212
  with gr.Column(scale=1):
213
+ gr.Markdown("### πŸ’Ύ μ €μž₯된 λŒ€ν™”")
214
+ convo_list = gr.Dropdown(label="λŒ€ν™” 선택", choices=[])
215
+ refresh_btn = gr.Button("πŸ”„ μƒˆλ‘œκ³ μΉ¨")
216
+ load_btn = gr.Button("πŸ“‚ 뢈러였기")
217
 
218
+ refresh_btn.click(refresh_conversation_list, [hf_token], convo_list)
219
+ load_btn.click(load_conversation, [hf_token, convo_list], chatbot)
220
 
221
  if __name__ == "__main__":
222
  demo.launch(server_name="0.0.0.0", server_port=7860)