Spaces:
Runtime error
Runtime error
File size: 3,277 Bytes
95a357c 6f8b5f4 95a357c f30de87 96015d7 6f8b5f4 d678c84 6f8b5f4 d678c84 6f8b5f4 1ae0aa6 6f8b5f4 95a357c d678c84 95a357c d678c84 96015d7 1ae0aa6 95a357c d678c84 95a357c 950283a 6343136 950283a af49772 95a357c e82548a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
import os
from slack_bolt import App, Assistant, BoltContext, Say, SetSuggestedPrompts, SetStatus
from slack_bolt.adapter.socket_mode import SocketModeHandler
import logging
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from transformers import (
load_tool,
ReactCodeAgent,
HfApiEngine,
stream_to_gradio,
)
logging.basicConfig(level=logging.INFO)
logging.info('App loading again..\n\n')
# Import tool from Hub
hf_token = os.environ.get("HF_TOKEN")
llm_engine = HfApiEngine(model="Qwen/Qwen2.5-72B-Instruct", token=hf_token)
# Initialize the agent with the image generation tool
agent = ReactCodeAgent(
tools=[], llm_engine=llm_engine, additional_authorized_imports=['requests', 'bs4', "html5lib"], add_base_tools=True
)
logging.info('App initialized \n')
# Initializes your app with your bot token and socket mode handler
app = App(token=os.environ.get("SLACK_BOT_TOKEN"))
assistant = Assistant()
# This listener is invoked when a human user opened an assistant thread
@assistant.thread_started
def start_assistant_thread(say: Say, set_suggested_prompts: SetSuggestedPrompts):
# Send the first reply to the human who started chat with your app's assistant bot
say(":wave: Hi, how can I help you today?")
# Setting suggested prompts is optional
set_suggested_prompts(
prompts=[
# If the suggested prompt is long, you can use {"title": "short one to display", "message": "full prompt"} instead
{"title" : "Summarize latest post from Slack engineering blog",
"message": "Read blog post https://slack.engineering/slack-audit-logs-and-anomalies/ and summarize it in 200 words"}
],
)
# This listener is invoked when the human user sends a reply in the assistant thread
@assistant.user_message
def respond_in_assistant_thread(
payload: dict,
logger: logging.Logger,
context: BoltContext,
set_status: SetStatus,
client: WebClient,
say: Say,
):
try:
# Tell the human user the assistant bot acknowledges the request and is working on it
set_status("is hard at work...")
query = payload['blocks'][0]['elements'][0]['elements'][0]['text']
# Pass the latest prompt and chat history to the LLM (call_llm is your own code)
agent_gen = agent.run(task=query, stream=True, url="https://slack.engineering/slack-audit-logs-and-anomalies/")
for val in agent_gen:
if 'final_answer' in val:
say(f":tada: *Final Answer* : {val['final_answer']}")
elif 'rationale' in val:
say(f":thinking_face: {val['rationale']}")
except Exception as e:
logger.exception(f"Failed to respond to an inquiry: {e}")
# Don't forget sending a message telling the error
# Without this, the status 'is typing...' won't be cleared, therefore the end-user is unable to continue the chat
say(f":warning: Sorry, something went wrong during processing your request (error: {e})")
# Enable this assistant middleware in your Bolt app
app.use(assistant)
# # Start your app
# if __name__ == "__main__":
# logging.info('Listening on socket mode \n')
# SocketModeHandler(app, os.environ["SLACK_APP_TOKEN"]).start() |