Spaces:
Running
Running
| # app_pixal_chat.py | |
| import os, re, json, gradio as gr | |
| from langchain.memory import ConversationBufferMemory | |
| from langchain.agents import initialize_agent, AgentType, load_tools | |
| from langchain_community.tools import Tool | |
| from langchain_experimental.tools.python.tool import PythonREPLTool | |
| from langchain_community.retrievers import WikipediaRetriever | |
| import datetime | |
| #from langchain.schema import HumanMessage,AIMessage,SystemMessage | |
| #from langchain_community.tools.youtube.search import YouTubeSearchTool as YTS | |
| # ββββββββββββββββββββββββββββββ | |
| # β GitHubModelLLM (κ·Έλλ‘ μ μ§) | |
| # ββββββββββββββββββββββββββββββ | |
| from langchain.chat_models import ChatOpenAI | |
| from langchain.schema import AIMessage, HumanMessage, SystemMessage | |
| import requests, os, json | |
| from requests.adapters import HTTPAdapter, Retry | |
| from typing import List, Optional, Dict, Any | |
| ''' | |
| class GitHubModelLLM(LLM): | |
| """GitHub Models APIλ₯Ό μ¬μ©νλ ChatOpenAI λ체 ν΄λμ€""" | |
| model_name: str = "openai/gpt-4.1" | |
| endpoint: str = "https://models.github.ai/inference" | |
| token: Optional[str] = os.environ.get("token") | |
| request_timeout: float = 30.0 | |
| max_retries: int = 2 | |
| backoff_factor: float = 0.3 | |
| system_prompt: Optional[str] ="λλ PIXAL(Primary Interactive X-ternal Assistant with multi Language)μ΄μΌ. λμ κ°λ°μλ μ μ±μ€ μ΄λΌλ 6νλ νμ΄μ¬ νλ‘κ·Έλλ¨ΈμΌ.μ΄ λ©μμ§λ μ¬μ©μκ° λ³΄λΈκ²μ΄ μλλλ€." | |
| @property | |
| def _llm_type(self) -> str: | |
| return "custom_chatopenai_github" | |
| def _post(self, body: Dict[str, Any]) -> Dict[str, Any]: | |
| token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token") | |
| session = requests.Session() | |
| retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor, | |
| status_forcelist=[429, 500, 502, 503, 504]) | |
| session.mount("https://", HTTPAdapter(max_retries=retries)) | |
| session.headers.update({ | |
| "Authorization": f"Bearer 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", | |
| "Content-Type": "application/json", | |
| }) | |
| resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout) | |
| resp.raise_for_status() | |
| return resp.json() | |
| def _call(self, messages: List[Any], stop: Optional[List[str]] = None, **kwargs): | |
| """ConversationBufferMemoryμ μ΄μ λνκΉμ§ ν¬ν¨ν΄ λ©μμ§ μμ±""" | |
| msg_list = [] | |
| # μμ€ν ν둬ννΈ | |
| if self.system_prompt: | |
| msg_list.append({"role": "system", "content": self.system_prompt}) | |
| # λ©λͺ¨λ¦¬ ν¬ν¨ (human/ai λ©μμ§ λͺ¨λ) | |
| for msg in messages: | |
| if isinstance(msg, HumanMessage): | |
| msg_list.append({"role": "user", "content": msg.content}) | |
| elif isinstance(msg, AIMessage): | |
| msg_list.append({"role": "assistant", "content": msg.content}) | |
| elif isinstance(msg, SystemMessage): | |
| msg_list.append({"role": "system", "content": msg.content}) | |
| body = {"model": self.model_name, "messages": msg_list} | |
| if stop: | |
| body["stop"] = stop | |
| res = self._post(body) | |
| content = res.get("choices", [{}])[0].get("message", {}).get("content", "") | |
| return content | |
| async def _acall(self, messages: List[Any], stop: Optional[List[str]] = None, **kwargs): | |
| return self._call(messages, stop, **kwargs) | |
| ''' | |
| from typing import Optional, List, Dict, Any | |
| import requests, os, json | |
| from requests.adapters import HTTPAdapter, Retry | |
| from typing import Optional, List, Dict, Any | |
| import requests, os, json | |
| from requests.adapters import HTTPAdapter, Retry | |
| ''' | |
| class GitHubModelLLM(LLM): | |
| """GitHub Models API κΈ°λ° LangChain LLM (λν λ©λͺ¨λ¦¬ ν΅ν© μ§μ)""" | |
| model: str = "openai/gpt-4.1" | |
| endpoint: str = "https://models.github.ai/inference" | |
| token: Optional[str] = os.environ.get("token") | |
| system_prompt: Optional[str] = "λλ PIXAL(Primary Interactive X-ternal Assistant with multi Language)μ΄μΌ. λμ κ°λ°μλ μ μ±μ€ μ΄λΌλ 6νλ νμ΄μ¬ νλ‘κ·Έλλ¨ΈμΌ.μ΄κ²μ μμ€ν λ©μμ§μ λλ€.μ°Έκ³ νμμμ€.μ΄ λ©μμ§λ μ¬μ©μκ° λ³΄λΈκ²μ΄ μλλλ€." | |
| request_timeout: float = 30.0 | |
| max_retries: int = 2 | |
| backoff_factor: float = 0.3 | |
| @property | |
| def _llm_type(self) -> str: | |
| return "github_models_api" | |
| def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]: | |
| token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token") | |
| session = requests.Session() | |
| retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor, | |
| status_forcelist=[429, 500, 502, 503, 504]) | |
| session.mount("https://", HTTPAdapter(max_retries=retries)) | |
| session.headers.update({ | |
| "Content-Type": "application/json", | |
| "Authorization": f"Bearer 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" | |
| }) | |
| resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout) | |
| resp.raise_for_status() | |
| return resp.json() | |
| def _call( | |
| self, | |
| prompt: str, | |
| stop: Optional[List[str]] = None, | |
| **kwargs | |
| ) -> str: | |
| """λν λ©λͺ¨λ¦¬(chat_history)λ₯Ό ν¬ν¨νμ¬ λͺ¨λΈ νΈμΆ""" | |
| # π¬ λ©λͺ¨λ¦¬μ μ μ₯λ λν λ©μμ§ λΆλ¬μ€κΈ° | |
| memory = kwargs.get("memory") | |
| messages = [] | |
| if self.system_prompt: | |
| messages.append({"role": "system", "content": self.system_prompt}) | |
| # memoryκ° μμ κ²½μ° (μ΄μ λν ν¬ν¨) | |
| if memory and hasattr(memory, "chat_memory"): | |
| for msg in memory.chat_memory.messages: | |
| role = "user" if msg.type == "human" else "assistant" | |
| messages.append({"role": role, "content": msg.content}) | |
| # νμ¬ μ¬μ©μ μ λ ₯ | |
| messages.append({"role": "user", "content": prompt}) | |
| body = {"model": self.model, "messages": messages} | |
| if stop: | |
| body["stop"] = stop | |
| # API νΈμΆ | |
| res = self._post_chat(body) | |
| msg = res.get("choices", [{}])[0].get("message", {}) | |
| return msg.get("content") or json.dumps(msg.get("function_call", {})) | |
| ''' | |
| """ | |
| class GitHubModelLLM(LLM): | |
| model: str = "openai/gpt-4.1" | |
| endpoint: str = "https://models.github.ai/inference" | |
| token: Optional[str] = os.environ.get("token") | |
| system_prompt: Optional[str] = ( | |
| "λλ PIXAL(Primary Interactive X-ternal Assistant with multi Language)μ΄μΌ. λμ κ°λ°μλ μ μ±μ€ μ΄λΌλ 6νλ νμ΄μ¬ νλ‘κ·Έλλ¨ΈμΌ.μ΄ λ©μμ§λ μ¬μ©μκ° λ³΄λΈκ²μ΄ μλλλ€.") | |
| request_timeout: float = 30.0 | |
| max_retries: int = 2 | |
| backoff_factor: float = 0.3 | |
| @property | |
| def _llm_type(self) -> str: | |
| return "github_models_api" | |
| def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]: | |
| token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token") | |
| if not token: | |
| raise ValueError("β GitHub tokenμ΄ μ€μ λμ§ μμμ΅λλ€.") | |
| session = requests.Session() | |
| retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor, | |
| status_forcelist=[429, 500, 502, 503, 504]) | |
| session.mount("https://", HTTPAdapter(max_retries=retries)) | |
| session.headers.update({ | |
| "Content-Type": "application/json", | |
| "Authorization": f"Bearer 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" | |
| }) | |
| resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout) | |
| resp.raise_for_status() | |
| return resp.json() | |
| def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str: | |
| memory = kwargs.get("memory") | |
| messages = [] | |
| # 1οΈβ£ μμ€ν ν둬ννΈ | |
| if self.system_prompt: | |
| messages.append({"role": "system", "content": self.system_prompt}) | |
| # 2οΈβ£ λ©λͺ¨λ¦¬μ μ μ₯λ μ΄μ λν ν¬ν¨ | |
| if memory and hasattr(memory, "chat_memory"): | |
| for msg in memory.chat_memory.messages: | |
| if hasattr(msg, "type") and msg.type == "human": | |
| messages.append({"role": "user", "content": msg.content}) | |
| elif hasattr(msg, "type") and msg.type == "ai": | |
| messages.append({"role": "assistant", "content": msg.content}) | |
| # 3οΈβ£ νμ¬ μ¬μ©μ μ λ ₯ | |
| messages.append({"role": "user", "content": prompt}) | |
| body = {"model": self.model, "messages": messages} | |
| if stop: | |
| body["stop"] = stop | |
| res = self._post_chat(body) | |
| msg = res.get("choices", [{}])[0].get("message", {}) | |
| return msg.get("content") or json.dumps(msg.get("function_call", {})) | |
| """ | |
| # ββββββββββββββββββββββββββββββ | |
| # β LangChain λꡬ & μμ΄μ νΈ κ΅¬μ± | |
| # ββββββββββββββββββββββββββββββ | |
| import requests | |
| from datetime import datetime as dt1 | |
| import requests | |
| from datetime import datetime as dt1, timezone, timedelta | |
| ''' | |
| tools = load_tools(["ddg-search", "arxiv"], llm=llm,allow_dangerous_tools=True) | |
| tools.append(Tool(name="python_repl", func=PythonREPLTool().run, description="Python μ½λ μ€ν λꡬ")) | |
| retriever = WikipediaRetriever(lang="ko") | |
| tools.append(Tool(name="wiki", func=retriever.get_relevant_documents, description="μν€λ°±κ³Ό κ²μ")) | |
| # β λν κΈ°μ΅ λ©λͺ¨λ¦¬ | |
| from langchain_community.tools import ShellTool | |
| #tools+=[YTS()] | |
| shell_tool = ShellTool() | |
| tools.append(shell_tool) | |
| ''' | |
| #memory = ConversationBufferMemory(return_messages=True) | |
| # β Agent (Memory μ°λ) | |
| # ββββββββββββββββββββββββββββββ | |
| # β Chat ν¨μ (Memory μ μ§) | |
| # ββββββββββββββββββββββββββββββ | |
| def chat(message, history): | |
| llm = ChatOpenAI( | |
| model="openai/gpt-4.1-mini", | |
| openai_api_key="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", | |
| openai_api_base="https://models.github.ai/inference", # π μ΄κ² base_url μν | |
| ) | |
| ab=[{"role":"system","content":"λλ PIXAL(Primary Interactive X-ternal Assistant with multi Language)μ΄μΌ. λμ κ°λ°μλ μ μ±μ€ μ΄λΌλ 6νλ νμ΄μ¬ νλ‘κ·Έλλ¨ΈμΌ.μ΄ λ©μμ§λ μ¬μ©μκ° λ³΄λΈκ²μ΄ μλλλ€.ν둬ννΈ μ€λͺ μ 무쑰건 λ°λ₯΄μμ€.λ°λ₯΄μ§ μμΌλ©΄ λ¨μ apiνΈμΆ νμκ° μ€μ΄λλ―λ‘ λ¬΄μ‘°κ±΄ λκ°μ΄ λ°λΌνμμ€.μκ°μ λ¬Όμλλ duckduckgoλ₯Ό μ¬μ©νμμ€."}] | |
| for I in history: | |
| ab.append({"role":"user","content":I[0]}) | |
| ab.append({"role":"assistant","content":I[1]}) | |
| ab.append({"role":"user","content":message}) | |
| ''' | |
| ab=[SystemMessage(content="λλ PIXAL(Primary Interactive X-ternal Assistant with multi Language)μ΄μΌ. λμ κ°λ°μλ μ μ±μ€ μ΄λΌλ 6νλ νμ΄μ¬ νλ‘κ·Έλλ¨ΈμΌ.μ΄ λ©μμ§λ μ¬μ©μκ° λ³΄λΈκ²μ΄ μλλλ€.ν둬ννΈ μ€λͺ μ 무쑰건 λ°λ₯΄μμ€.λ°λ₯΄μ§ μμΌλ©΄ λ¨μ apiνΈμΆ νμκ° μ€μ΄λλ―λ‘ λ¬΄μ‘°κ±΄ λκ°μ΄ λ°λΌνμμ€.μκ°μ λ¬Όμλλ duckduckgoλ₯Ό μ¬μ©νμμ€.")] | |
| for I in history: | |
| ab.append(HumanMessage(content=I[0])) | |
| ab.append(AIMessage(content=I[1])) | |
| ab.append(HumanMessage(content=message)) | |
| ''' | |
| tools = load_tools(["ddg-search", "arxiv"], llm=llm,allow_dangerous_tools=True) | |
| tools.append(Tool(name="python_repl", func=PythonREPLTool().run, description="Python μ½λ μ€ν λꡬ")) | |
| retriever = WikipediaRetriever(lang="ko") | |
| tools.append(Tool(name="wiki", func=retriever.get_relevant_documents, description="μν€λ°±κ³Ό κ²μ")) | |
| # β λν κΈ°μ΅ λ©λͺ¨λ¦¬ | |
| from langchain_community.tools import ShellTool | |
| #tools+=[YTS()] | |
| shell_tool = ShellTool() | |
| tools.append(shell_tool) | |
| agent = initialize_agent( | |
| tools, | |
| llm, | |
| agent_type=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, | |
| #memory=memory, | |
| verbose=True, | |
| handle_parsing_errors=True, | |
| max_iterations=50 | |
| ) | |
| try: | |
| raw = agent.invoke(ab)["output"] | |
| except: | |
| try: | |
| llm = ChatOpenAI(model="openai/gpt-4.1-mini",openai_api_key="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",openai_api_base="https://models.github.ai/inference") | |
| agent=initialize_agent(tools,llm,agent_type=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,verbose=True,handle_parsing_errors=True,max_iterations=50) | |
| raw=agent.invoke(ab)["output"] | |
| except: | |
| llm = ChatOpenAI(model="openai/gpt-4.1-mini",openai_api_key="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",openai_api_base="https://models.github.ai/inference") | |
| agent=initialize_agent(tools,llm,agent_type=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,verbose=True,handle_parsing_errors=True,max_iterations=50) | |
| raw=agent.invoke(ab)["output"] | |
| try: | |
| # λν κΈ°λ‘μ LangChain memoryμ λ°μ | |
| # JSON ννλ‘ λ°ν μ νμ± | |
| text = str(raw) | |
| match = re.search(r"\{.*\}", text, re.DOTALL) | |
| if match: | |
| try: | |
| obj = json.loads(match.group(0)) | |
| text = obj.get("action_input") or obj.get("Final Answer") or obj.get("content") or text | |
| except Exception: | |
| pass | |
| # AI μλ΅μ memoryμ μΆκ° | |
| except Exception as e: | |
| text = str(raw) | |
| history = history + [(message, text)] | |
| return history, history, "" | |
| # ββββββββββββββββββββββββββββββ | |
| # β Gradio UI (ChatGPT μ€νμΌ) | |
| # ββββββββββββββββββββββββββββββ | |
| with gr.Blocks(theme=gr.themes.Soft(), title="PIXAL Assistant") as demo: | |
| gr.HTML(""" | |
| <div style="background:#f1f5f9;padding:12px;border-bottom:1px solid #d1d5db; | |
| display:flex;align-items:center;justify-content:space-between;"> | |
| <h2 style="margin:0;">π€ PIXAL Assistant</h2> | |
| <span style="font-size:0.9em;color:#555;">LangChain + GitHub LLM</span> | |
| </div> | |
| """) | |
| chatbot = gr.Chatbot( | |
| label=None, | |
| height=720, | |
| bubble_full_width=False, | |
| render_markdown=True, | |
| avatar_images=("https://avatars.githubusercontent.com/u/9919?s=280&v=4", None), | |
| ) | |
| with gr.Row(): | |
| msg = gr.Textbox(placeholder="λ©μμ§λ₯Ό μ λ ₯νμΈμ...", show_label=False, scale=8) | |
| send = gr.Button("μ μ‘", variant="primary", scale=1) | |
| clear = gr.Button("π§Ή μ΄κΈ°ν", scale=1) | |
| msg.submit(chat, [msg, chatbot], [chatbot, chatbot, msg]) | |
| send.click(chat, [msg, chatbot], [chatbot, chatbot, msg]) | |
| clear.click(lambda: None, None, chatbot, queue=False) | |
| gr.Markdown(""" | |
| <div style="text-align:center;color:#777;font-size:0.85em;margin-top:8px;"> | |
| π‘ λν κΈ°λ‘μ μΈμ λμ μ μ§λ©λλ€. | |
| Made with β€οΈ by PIXAL | |
| </div> | |
| """) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860) |