import os from slack_bolt import App, Assistant, BoltContext, Say, SetSuggestedPrompts, SetStatus from slack_bolt.adapter.socket_mode import SocketModeHandler import logging from slack_sdk import WebClient from slack_sdk.errors import SlackApiError from transformers import ( load_tool, ReactCodeAgent, HfApiEngine, stream_to_gradio, ) from fastapi import FastAPI, Request from slack_bolt.adapter.fastapi import SlackRequestHandler import gradio as gr from fastapi.responses import RedirectResponse assistant = Assistant() logging.basicConfig(level=logging.INFO) # Import tool from Hub hf_token = os.environ.get("HF_TOKEN") llm_engine = HfApiEngine(model="Qwen/Qwen2.5-72B-Instruct", token=hf_token) # Initialize the agent with the image generation tool agent = ReactCodeAgent( tools=[], llm_engine=llm_engine, additional_authorized_imports=['requests', 'bs4', "html5lib", "json"], add_base_tools=True ) # This listener is invoked when a human user opened an assistant thread @assistant.thread_started def start_assistant_thread(say: Say, set_suggested_prompts: SetSuggestedPrompts): # Send the first reply to the human who started chat with your app's assistant bot say(":wave: Hi, how can I help you today?") # Setting suggested prompts is optional set_suggested_prompts( prompts=[ # If the suggested prompt is long, you can use {"title": "short one to display", "message": "full prompt"} instead {"title" : "Get latest XKCD comic in French", "message": "Get latest xkcd comic and translate it to french. Output title, content and link on seperate lines."} ], ) # This listener is invoked when the human user sends a reply in the assistant thread @assistant.user_message def respond_in_assistant_thread( payload: dict, logger: logging.Logger, context: BoltContext, set_status: SetStatus, client: WebClient, say: Say, ): try: # Tell the human user the assistant bot acknowledges the request and is working on it set_status("is hard at work...") query = payload['blocks'][0]['elements'][0]['elements'][0]['text'] # Pass the latest prompt and chat history to the LLM (call_llm is your own code) agent_gen = agent.run(task=query, stream=True, xkcd_url="https://xkcd.com/info.0.json") for val in agent_gen: if 'final_answer' in val: say(f":tada: *Final Answer* :\n {val['final_answer']}") elif 'rationale' in val: say(f":thinking_face: {val['rationale']} \n\n :hourglass: Task is not finished yet!\n\n") set_status("is hard at work...") except Exception as e: logger.exception(f"Failed to respond to an inquiry: {e}") # Don't forget sending a message telling the error # Without this, the status 'is typing...' won't be cleared, therefore the end-user is unable to continue the chat say(f":warning: Sorry, something went wrong during processing your request (error: {e})") # Enable this assistant middleware in your Bolt app slack_app = App(token=os.environ.get("SLACK_BOT_TOKEN"), signing_secret=os.environ.get("SLACK_SIGNING_SECRET")) slack_app.use(assistant) app_handler = SlackRequestHandler(slack_app) app = FastAPI() @app.post("/slack/events") async def endpoint(req: Request): logging.info('recevied slack event') return await app_handler.handle(req) #### Gradio def interact_with_agent(task): messages = [] messages.append(gr.ChatMessage(role="user", content=task)) yield messages for msg in stream_to_gradio(agent, task): messages.append(msg) yield messages + [ gr.ChatMessage(role="assistant", content="⏳ Task not finished yet!") ] yield messages with gr.Blocks() as io: text_input = gr.Textbox(lines=1, label="Chat Message", value="Get latest xkcd comic and translate it to French") submit = gr.Button("Run agent!") chatbot = gr.Chatbot( label="Agent", type="messages", avatar_images=( None, "https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png", ), ) submit.click(interact_with_agent, [text_input], [chatbot]) gr.mount_gradio_app(app, io, path='/gradio') @app.get("/") def root(request: Request): logging.info(f"Redirecting to : {request.base_url}gradio") return RedirectResponse(f"{request.base_url}gradio", status_code=307)