diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..0993457777d1653cd1f0e0f48b530c059d1672c4 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,15 @@ +FROM python:3.9-slim + +WORKDIR /code + +COPY ./requirements.txt /code/requirements.txt +COPY ./api.py /code/api.py +COPY ./json_parser.py /code/json_parser.py +COPY ./logger_config.py /code/logger_config.py +COPY ./response_formatter.py /code/response_formatter.py + +RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt + +EXPOSE 7860 + +CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "7860"] \ No newline at end of file diff --git a/api.py b/api.py new file mode 100644 index 0000000000000000000000000000000000000000..a927ba88bb95ad60f1f750a39e5ac01a6cff1571 --- /dev/null +++ b/api.py @@ -0,0 +1,346 @@ +from fastapi import FastAPI, HTTPException, Request +from fastapi.middleware.cors import CORSMiddleware +from typing import Dict, List, Optional, Union, Any +from pydantic import BaseModel, Field +from datetime import datetime +import logging +import json +import os +from dotenv import load_dotenv +from dify_client_python.dify_client import models +from sse_starlette.sse import EventSourceResponse +import httpx +from json_parser import SSEParser +from logger_config import setup_logger +from fastapi.responses import StreamingResponse +from fastapi.responses import JSONResponse +from response_formatter import ResponseFormatter +import traceback + +# Load environment variables +load_dotenv() + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +logger = logging.getLogger(__name__) + +class AgentOutput(BaseModel): + """Structured output from agent processing""" + thought_content: str + observation: Optional[str] + tool_outputs: List[Dict] + citations: List[Dict] + metadata: Dict + raw_response: str + +class AgentRequest(BaseModel): + """Enhanced request model with additional parameters""" + query: str + conversation_id: Optional[str] = None + stream: bool = True + inputs: Dict = {} + files: List = [] + user: str = "default_user" + response_mode: str = "streaming" + +class AgentProcessor: + def __init__(self, api_key: str): + self.api_key = api_key + self.api_base = "https://rag-engine.go-yamamoto.com/v1" + self.formatter = ResponseFormatter() + self.client = httpx.AsyncClient(timeout=60.0) + self.logger = setup_logger("agent_processor") + + async def log_request_details( + self, + request: AgentRequest, + start_time: datetime + ) -> None: + """Log detailed request information""" + self.logger.debug( + "Request details: \n" + f"Query: {request.query}\n" + f"User: {request.user}\n" + f"Conversation ID: {request.conversation_id}\n" + f"Stream mode: {request.stream}\n" + f"Start time: {start_time}\n" + f"Inputs: {request.inputs}\n" + f"Files: {len(request.files)} files attached" + ) + + async def log_error( + self, + error: Exception, + context: Optional[Dict] = None + ) -> None: + """Log detailed error information""" + error_msg = ( + f"Error type: {type(error).__name__}\n" + f"Error message: {str(error)}\n" + f"Stack trace:\n{traceback.format_exc()}\n" + ) + if context: + error_msg += f"Context:\n{json.dumps(context, indent=2)}" + + self.logger.error(error_msg) + + async def cleanup(self): + """Cleanup method to properly close client""" + await self.client.aclose() + + async def process_stream(self, request: AgentRequest): + start_time = datetime.now() + await self.log_request_details(request, start_time) + + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + "Accept": "text/event-stream" + } + + chat_request = { + "query": request.query, + "inputs": request.inputs, + "response_mode": "streaming" if request.stream else "blocking", + "user": request.user, + "conversation_id": request.conversation_id, + "files": request.files + } + + async def event_generator(): + parser = SSEParser() + citations = [] + metadata = {} + + try: + async with self.client.stream( + "POST", + f"{self.api_base}/chat-messages", + headers=headers, + json=chat_request + ) as response: + self.logger.debug( + f"Stream connection established\n" + f"Status: {response.status_code}\n" + f"Headers: {dict(response.headers)}" + ) + + buffer = "" + async for line in response.aiter_lines(): + if not line.strip(): + continue + + self.logger.debug(f"Raw SSE line: {line}") + + if "data:" in line: + try: + data = line.split("data:", 1)[1].strip() + parsed = json.loads(data) + + if parsed.get("event") == "message_end": + citations = parsed.get("retriever_resources", []) + metadata = parsed.get("metadata", {}) + self.logger.debug( + f"Message end event:\n" + f"Citations: {citations}\n" + f"Metadata: {metadata}" + ) + + formatted = self.format_terminal_output( + parsed, + citations=citations, + metadata=metadata + ) + if formatted: + self.logger.info(formatted) + except Exception as e: + await self.log_error( + e, + {"line": line, "event": "parse_data"} + ) + + buffer += line + "\n" + + if line.startswith("data:") or buffer.strip().endswith("}"): + try: + processed_response = parser.parse_sse_event(buffer) + if processed_response and isinstance(processed_response, dict): + cleaned_response = self.clean_response(processed_response) + if cleaned_response: + xml_content = cleaned_response.get("content", "") + yield f"data: {xml_content}\n\n" + except Exception as parse_error: + await self.log_error( + parse_error, + {"buffer": buffer, "event": "process_buffer"} + ) + error_xml = ( + f"" + f"{str(parse_error)}" + f"" + ) + yield f"data: {error_xml}\n\n" + finally: + buffer = "" + + except httpx.ConnectError as e: + await self.log_error(e, {"event": "connection_error"}) + error_xml = ( + f"" + f"Connection error: {str(e)}" + f"" + ) + yield f"data: {error_xml}\n\n" + except Exception as e: + await self.log_error(e, {"event": "stream_error"}) + error_xml = ( + f"" + f"Streaming error: {str(e)}" + f"" + ) + yield f"data: {error_xml}\n\n" + finally: + end_time = datetime.now() + duration = (end_time - start_time).total_seconds() + self.logger.info(f"Request completed in {duration:.2f} seconds") + + return StreamingResponse( + event_generator(), + media_type="text/event-stream", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + "X-Accel-Buffering": "no", + "Access-Control-Allow-Origin": "*" + } + ) + + def format_terminal_output( + self, + response: Dict, + citations: List[Dict] = None, + metadata: Dict = None + ) -> Optional[str]: + """Format response for terminal output""" + event_type = response.get("event") + + if event_type == "agent_thought": + thought = response.get("thought", "") + observation = response.get("observation", "") + terminal_output, _ = self.formatter.format_thought( + thought, + observation, + citations=citations, + metadata=metadata + ) + return terminal_output + + elif event_type == "agent_message": + message = response.get("answer", "") + terminal_output, _ = self.formatter.format_message(message) + return terminal_output + + elif event_type == "error": + error = response.get("error", "Unknown error") + terminal_output, _ = self.formatter.format_error(error) + return terminal_output + + return None + + def clean_response(self, response: Dict) -> Optional[Dict]: + """Clean and transform the response for frontend consumption""" + try: + event_type = response.get("event") + if not event_type: + return None + + # Handle different event types + if event_type == "agent_thought": + thought = response.get("thought", "") + observation = response.get("observation", "") + _, xml_output = self.formatter.format_thought(thought, observation) + return { + "type": "thought", + "content": xml_output + } + + elif event_type == "agent_message": + message = response.get("answer", "") + _, xml_output = self.formatter.format_message(message) + return { + "type": "message", + "content": xml_output + } + + elif event_type == "error": + error = response.get("error", "Unknown error") + _, xml_output = self.formatter.format_error(error) + return { + "type": "error", + "content": xml_output + } + + return None + except Exception as e: + logger.error(f"Error cleaning response: {str(e)}") + return None + +# Initialize FastAPI app +app = FastAPI() +agent_processor = None + +# Add CORS middleware +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +@app.on_event("startup") +async def startup_event(): + global agent_processor + api_key = os.getenv("DIFY_API_KEY", "app-kVHTrZzEmFXEBfyXOi4rro7M") + agent_processor = AgentProcessor(api_key=api_key) + +@app.on_event("shutdown") +async def shutdown_event(): + global agent_processor + if agent_processor: + await agent_processor.cleanup() + +@app.post("/v1/agent") +async def process_agent_request(request: AgentRequest): + try: + logger.info(f"Processing agent request: {request.query}") + return await agent_processor.process_stream(request) + + except Exception as e: + logger.error(f"Error in agent request processing: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=str(e)) + +@app.middleware("http") +async def error_handling_middleware(request: Request, call_next): + try: + response = await call_next(request) + return response + except Exception as e: + logger.error(f"Unhandled error: {str(e)}", exc_info=True) + return JSONResponse( + status_code=500, + content={"error": "Internal server error occurred"} + ) + +# Add host and port parameters to the launch +if __name__ == "__main__": + import uvicorn + uvicorn.run( + "api:app", + host="0.0.0.0", + port=8224, + reload=True + ) \ No newline at end of file diff --git a/api_docs.md b/api_docs.md new file mode 100644 index 0000000000000000000000000000000000000000..85a9093c578fed4d00c69f345a1b0a7acd1755d7 --- /dev/null +++ b/api_docs.md @@ -0,0 +1,154 @@ +POST +/chat-messages +Send Chat Message +Send a request to the chat application. + +Request Body +Name +query +Type +string +Description +User Input/Question content + +Name +inputs +Type +object +Description +Allows the entry of various variable values defined by the App. The inputs parameter contains multiple key/value pairs, with each key corresponding to a specific variable and each value being the specific value for that variable. Default {} + +Name +response_mode +Type +string +Description +The mode of response return, supporting: + +streaming Streaming mode (recommended), implements a typewriter-like output through SSE (Server-Sent Events). +blocking Blocking mode, returns result after execution is complete. (Requests may be interrupted if the process is long) Due to Cloudflare restrictions, the request will be interrupted without a return after 100 seconds. Note: blocking mode is not supported in Agent Assistant mode +Name +user +Type +string +Description +User identifier, used to define the identity of the end-user for retrieval and statistics. Should be uniquely defined by the developer within the application. + +Name +conversation_id +Type +string +Description +Conversation ID, to continue the conversation based on previous chat records, it is necessary to pass the previous message's conversation_id. + +Name +files +Type +array[object] +Description +File list, suitable for inputting files (images) combined with text understanding and answering questions, available only when the model supports Vision capability. + +type (string) Supported type: image (currently only supports image type) +transfer_method (string) Transfer method, remote_url for image URL / local_file for file upload +url (string) Image URL (when the transfer method is remote_url) +upload_file_id (string) Uploaded file ID, which must be obtained by uploading through the File Upload API in advance (when the transfer method is local_file) +Name +auto_generate_name +Type +bool +Description +Auto-generate title, default is true. If set to false, can achieve async title generation by calling the conversation rename API and setting auto_generate to true. + +Response +When response_mode is blocking, return a CompletionResponse object. When response_mode is streaming, return a ChunkCompletionResponse stream. + +ChatCompletionResponse +Returns the complete App result, Content-Type is application/json. + +message_id (string) Unique message ID +conversation_id (string) Conversation ID +mode (string) App mode, fixed as chat +answer (string) Complete response content +metadata (object) Metadata +usage (Usage) Model usage information +retriever_resources (array[RetrieverResource]) Citation and Attribution List +created_at (int) Message creation timestamp, e.g., 1705395332 +ChunkChatCompletionResponse +Returns the stream chunks outputted by the App, Content-Type is text/event-stream. Each streaming chunk starts with data:, separated by two newline characters \n\n, as shown below: + +data: {"event": "message", "task_id": "900bbd43-dc0b-4383-a372-aa6e6c414227", "id": "663c5084-a254-4040-8ad3-51f2a3c1a77c", "answer": "Hi", "created_at": 1705398420}\n\n + +Copy +Copied! +The structure of the streaming chunks varies depending on the event: + +event: message LLM returns text chunk event, i.e., the complete text is output in a chunked fashion. +task_id (string) Task ID, used for request tracking and the below Stop Generate API +message_id (string) Unique message ID +conversation_id (string) Conversation ID +answer (string) LLM returned text chunk content +created_at (int) Creation timestamp, e.g., 1705395332 +event: agent_message LLM returns text chunk event, i.e., with Agent Assistant enabled, the complete text is output in a chunked fashion (Only supported in Agent mode) +task_id (string) Task ID, used for request tracking and the below Stop Generate API +message_id (string) Unique message ID +conversation_id (string) Conversation ID +answer (string) LLM returned text chunk content +created_at (int) Creation timestamp, e.g., 1705395332 +event: tts_message TTS audio stream event, that is, speech synthesis output. The content is an audio block in Mp3 format, encoded as a base64 string. When playing, simply decode the base64 and feed it into the player. (This message is available only when auto-play is enabled) +task_id (string) Task ID, used for request tracking and the stop response interface below +message_id (string) Unique message ID +audio (string) The audio after speech synthesis, encoded in base64 text content, when playing, simply decode the base64 and feed it into the player +created_at (int) Creation timestamp, e.g.: 1705395332 +event: tts_message_end TTS audio stream end event, receiving this event indicates the end of the audio stream. +task_id (string) Task ID, used for request tracking and the stop response interface below +message_id (string) Unique message ID +audio (string) The end event has no audio, so this is an empty string +created_at (int) Creation timestamp, e.g.: 1705395332 +event: agent_thought thought of Agent, contains the thought of LLM, input and output of tool calls (Only supported in Agent mode) +id (string) Agent thought ID, every iteration has a unique agent thought ID +task_id (string) (string) Task ID, used for request tracking and the below Stop Generate API +message_id (string) Unique message ID +position (int) Position of current agent thought, each message may have multiple thoughts in order. +thought (string) What LLM is thinking about +observation (string) Response from tool calls +tool (string) A list of tools represents which tools are called,split by ; +tool_input (string) Input of tools in JSON format. Like: {"dalle3": {"prompt": "a cute cat"}}. +created_at (int) Creation timestamp, e.g., 1705395332 +message_files (array[string]) Refer to message_file event +file_id (string) File ID +conversation_id (string) Conversation ID +event: message_file Message file event, a new file has created by tool +id (string) File unique ID +type (string) File type,only allow "image" currently +belongs_to (string) Belongs to, it will only be an 'assistant' here +url (string) Remote url of file +conversation_id (string) Conversation ID +event: message_end Message end event, receiving this event means streaming has ended. +task_id (string) Task ID, used for request tracking and the below Stop Generate API +message_id (string) Unique message ID +conversation_id (string) Conversation ID +metadata (object) Metadata +usage (Usage) Model usage information +retriever_resources (array[RetrieverResource]) Citation and Attribution List +event: message_replace Message content replacement event. When output content moderation is enabled, if the content is flagged, then the message content will be replaced with a preset reply through this event. +task_id (string) Task ID, used for request tracking and the below Stop Generate API +message_id (string) Unique message ID +conversation_id (string) Conversation ID +answer (string) Replacement content (directly replaces all LLM reply text) +created_at (int) Creation timestamp, e.g., 1705395332 +event: error Exceptions that occur during the streaming process will be output in the form of stream events, and reception of an error event will end the stream. +task_id (string) Task ID, used for request tracking and the below Stop Generate API +message_id (string) Unique message ID +status (int) HTTP status code +code (string) Error code +message (string) Error message +event: ping Ping event every 10 seconds to keep the connection alive. +Errors +404, Conversation does not exists +400, invalid_param, abnormal parameter input +400, app_unavailable, App configuration unavailable +400, provider_not_initialize, no available model credential configuration +400, provider_quota_exceeded, model invocation quota insufficient +400, model_currently_not_support, current model unavailable +400, completion_request_error, text generation failed +500, internal server error \ No newline at end of file diff --git a/dify_client_python/.DS_Store b/dify_client_python/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..e14c6122a0768efcd1652ec2b67c0aae793985c4 Binary files /dev/null and b/dify_client_python/.DS_Store differ diff --git a/dify_client_python/LICENSE b/dify_client_python/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..8c266e78184b3b04981b5f3cd44af2f1f2cb768e --- /dev/null +++ b/dify_client_python/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 haoyuhu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/dify_client_python/MANIFEST.in b/dify_client_python/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..da331d5e5ca076fe81fb91d255dfe44d1956bbc0 --- /dev/null +++ b/dify_client_python/MANIFEST.in @@ -0,0 +1 @@ +recursive-include dify_client *.py \ No newline at end of file diff --git a/dify_client_python/README.md b/dify_client_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2ff1abc6b14c1571721363a12fa6495f2b46d13f --- /dev/null +++ b/dify_client_python/README.md @@ -0,0 +1,155 @@ +# dify-client-python + +Welcome to the `dify-client-python` repository! This Python package provides a convenient and powerful interface to +interact with the Dify API, enabling developers to integrate a wide range of features into their applications with ease. + +## Main Features + +* **Synchronous and Asynchronous Support**: The client offers both synchronous and asynchronous methods, allowing for + flexible integration into various Python codebases and frameworks. +* **Stream and Non-stream Support**: Seamlessly work with both streaming and non-streaming endpoints of the Dify API for + real-time and batch processing use cases. +* **Comprehensive Endpoint Coverage**: Support completion, chat, workflows, feedback, file uploads, etc., the client + covers all available Dify API endpoints. + +## Installation + +Before using the `dify-client-python` client, you'll need to install it. You can easily install it using `pip`: + +```bash +pip install dify-client-python +``` + +## Quick Start + +Here's a quick example of how you can use the Dify Client to send a chat message. + +```python +import uuid +from dify_client import Client, models + +# Initialize the client with your API key +client = Client( + api_key="your-api-key", + api_base="http://localhost/v1", +) +user = str(uuid.uuid4()) + +# Create a blocking chat request +blocking_chat_req = models.ChatRequest( + query="Hi, dify-client-python!", + inputs={"city": "Beijing"}, + user=user, + response_mode=models.ResponseMode.BLOCKING, +) + +# Send the chat message +chat_response = client.chat_messages(blocking_chat_req, timeout=60.) +print(chat_response) + +# Create a streaming chat request +streaming_chat_req = models.ChatRequest( + query="Hi, dify-client-python!", + inputs={"city": "Beijing"}, + user=user, + response_mode=models.ResponseMode.STREAMING, +) + +# Send the chat message +for chunk in client.chat_messages(streaming_chat_req, timeout=60.): + print(chunk) +``` + +For asynchronous operations, use the `AsyncClient` in a similar fashion: + +```python +import asyncio +import uuid + +from dify_client import AsyncClient, models + +# Initialize the async client with your API key +async_client = AsyncClient( + api_key="your-api-key", + api_base="http://localhost/v1", +) + + +# Define an asynchronous function to send a blocking chat message with BLOCKING ResponseMode +async def send_chat_message(): + user = str(uuid.uuid4()) + # Create a blocking chat request + blocking_chat_req = models.ChatRequest( + query="Hi, dify-client-python!", + inputs={"city": "Beijing"}, + user=user, + response_mode=models.ResponseMode.BLOCKING, + ) + chat_response = await async_client.achat_messages(blocking_chat_req, timeout=60.) + print(chat_response) + + +# Define an asynchronous function to send a chat message with STREAMING ResponseMode +async def send_chat_message_stream(): + user = str(uuid.uuid4()) + # Create a blocking chat request + streaming_chat_req = models.ChatRequest( + query="Hi, dify-client-python!", + inputs={"city": "Beijing"}, + user=user, + response_mode=models.ResponseMode.STREAMING, + ) + async for chunk in await async_client.achat_messages(streaming_chat_req, timeout=60.): + print(chunk) + + +# Run the asynchronous function +asyncio.gather(send_chat_message(), send_chat_message_stream()) +``` + +## Documentation + +For detailed information on all the functionalities and how to use each endpoint, please refer to the official Dify API +documentation. This will provide you with comprehensive guidance on request and response structures, error handling, and +other important details. + +## Contributing + +Contributions are welcome! If you would like to contribute to the `dify-client-python`, please feel free to make a pull +request or open an issue to discuss potential changes. + +## License + +This project is licensed under the MIT License - see the LICENSE file for details. + +```text +MIT License + +Copyright (c) 2024 haoyuhu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +``` + +## Support + +If you encounter any issues or have questions regarding the usage of this client, please reach out to the Dify Client +support team. + +Happy coding! 🚀 \ No newline at end of file diff --git a/dify_client_python/build.sh b/dify_client_python/build.sh new file mode 100644 index 0000000000000000000000000000000000000000..ca1a762c9965b46cfb4d137e6e845d6865e7fd1a --- /dev/null +++ b/dify_client_python/build.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +set -e + +rm -rf build dist *.egg-info + +pip install setuptools wheel twine +python setup.py sdist bdist_wheel +twine upload dist/* \ No newline at end of file diff --git a/dify_client_python/build/lib/dify_client/__init__.py b/dify_client_python/build/lib/dify_client/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..77492cb3e6f5518a52223c79d01534cb0aea4d27 --- /dev/null +++ b/dify_client_python/build/lib/dify_client/__init__.py @@ -0,0 +1 @@ +from ._clientx import Client, AsyncClient diff --git a/dify_client_python/build/lib/dify_client/_clientx.py b/dify_client_python/build/lib/dify_client/_clientx.py new file mode 100644 index 0000000000000000000000000000000000000000..bf0ed7ab5c0f27f6d0750fd48ad94e6c2fa350f2 --- /dev/null +++ b/dify_client_python/build/lib/dify_client/_clientx.py @@ -0,0 +1,660 @@ +from typing import Optional, Any, Mapping, Iterator, AsyncIterator, Union, Dict + +try: + from enum import StrEnum +except ImportError: + from strenum import StrEnum +try: + from http import HTTPMethod +except ImportError: + class HTTPMethod(StrEnum): + GET = "GET" + POST = "POST" + PUT = "PUT" + DELETE = "DELETE" + +import httpx +# noinspection PyProtectedMember +import httpx._types as types +from httpx_sse import connect_sse, ServerSentEvent, aconnect_sse +from pydantic import BaseModel + +from dify_client import errors, models + +_httpx_client = httpx.Client() +_async_httpx_client = httpx.AsyncClient() + +IGNORED_STREAM_EVENTS = (models.StreamEvent.PING.value,) + +# feedback +ENDPOINT_FEEDBACKS = "/messages/{message_id}/feedbacks" +# suggest +ENDPOINT_SUGGESTED = "/messages/{message_id}/suggested" +# files upload +ENDPOINT_FILES_UPLOAD = "/files/upload" +# completion +ENDPOINT_COMPLETION_MESSAGES = "/completion-messages" +ENDPOINT_STOP_COMPLETION_MESSAGES = "/completion-messages/{task_id}/stop" +# chat +ENDPOINT_CHAT_MESSAGES = "/chat-messages" +ENDPOINT_STOP_CHAT_MESSAGES = "/chat-messages/{task_id}/stop" +# workflow +ENDPOINT_RUN_WORKFLOWS = "/workflows/run" +ENDPOINT_STOP_WORKFLOWS = "/workflows/{task_id}/stop" +# audio <-> text +ENDPOINT_TEXT_TO_AUDIO = "/text-to-audio" +ENDPOINT_AUDIO_TO_TEXT = "/audio-to-text" + + +class Client(BaseModel): + api_key: str + api_base: Optional[str] = "https://api.dify.ai/v1" + + def request(self, endpoint: str, method: str, + content: Optional[types.RequestContent] = None, + data: Optional[types.RequestData] = None, + files: Optional[types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[types.QueryParamTypes] = None, + headers: Optional[Mapping[str, str]] = None, + **kwargs: object, + ) -> httpx.Response: + """ + Sends a synchronous HTTP request to the specified endpoint. + + Args: + endpoint: The API endpoint to send the request to. + method: The HTTP method to use (e.g., 'GET', 'POST'). + content: Raw content to include in the request body. + data: Form data to include in the request body. + files: Files to include in the request body. + json: JSON data to include in the request body. + params: Query parameters to include in the request URL. + headers: Additional headers to include in the request. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + A `httpx.Response` object containing the HTTP response. + + Raises: + Various DifyAPIError exceptions if the response contains an error. + """ + merged_headers = {} + if headers: + merged_headers.update(headers) + self._prepare_auth_headers(merged_headers) + + response = _httpx_client.request(method, endpoint, content=content, data=data, files=files, json=json, + params=params, headers=merged_headers, **kwargs) + errors.raise_for_status(response) + return response + + def request_stream(self, endpoint: str, method: str, + content: Optional[types.RequestContent] = None, + data: Optional[types.RequestData] = None, + files: Optional[types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[types.QueryParamTypes] = None, + headers: Optional[Mapping[str, str]] = None, + **kwargs, + ) -> Iterator[ServerSentEvent]: + """ + Opens a server-sent events (SSE) stream to the specified endpoint. + + Args: + endpoint: The API endpoint to send the request to. + method: The HTTP method to use (e.g., 'GET', 'POST'). + content: Raw content to include in the request body. + data: Form data to include in the request body. + files: Files to include in the request body. + json: JSON data to include in the request body. + params: Query parameters to include in the request URL. + headers: Additional headers to include in the request. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + An iterator of `ServerSentEvent` objects representing the stream of events. + + Raises: + Various DifyAPIError exceptions if an error event is received in the stream. + """ + merged_headers = {} + if headers: + merged_headers.update(headers) + self._prepare_auth_headers(merged_headers) + + with connect_sse(_httpx_client, method, endpoint, headers=merged_headers, + content=content, data=data, files=files, json=json, params=params, **kwargs) as event_source: + if not _check_stream_content_type(event_source.response): + event_source.response.read() + errors.raise_for_status(event_source.response) + for sse in event_source.iter_sse(): + errors.raise_for_status(sse) + if sse.event in IGNORED_STREAM_EVENTS or sse.data in IGNORED_STREAM_EVENTS: + continue + yield sse + + def feedback_messages(self, message_id: str, req: models.FeedbackRequest, **kwargs) -> models.FeedbackResponse: + """ + Submits feedback for a specific message. + + Args: + message_id: The identifier of the message to submit feedback for. + req: A `FeedbackRequest` object containing the feedback details, such as the rating. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + A `FeedbackResponse` object containing the result of the feedback submission. + """ + response = self.request( + self._prepare_url(ENDPOINT_FEEDBACKS, message_id=message_id), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.FeedbackResponse(**response.json()) + + def suggest_messages(self, message_id: str, req: models.ChatSuggestRequest, **kwargs) -> models.ChatSuggestResponse: + """ + Retrieves suggested messages based on a specific message. + + Args: + message_id: The identifier of the message to get suggestions for. + req: A `ChatSuggestRequest` object containing the request details. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + A `ChatSuggestResponse` object containing suggested messages. + """ + response = self.request( + self._prepare_url(ENDPOINT_SUGGESTED, message_id=message_id), + HTTPMethod.GET, + params=req.model_dump(), + **kwargs, + ) + return models.ChatSuggestResponse(**response.json()) + + def upload_files(self, file: types.FileTypes, req: models.UploadFileRequest, + **kwargs) -> models.UploadFileResponse: + """ + Uploads a file to be used in subsequent requests. + + Args: + file: The file to upload. This can be a file-like object, or a tuple of + (`filename`, file-like object, mime_type). + req: An `UploadFileRequest` object containing the upload details, such as the user who is uploading. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + An `UploadFileResponse` object containing details about the uploaded file, such as its identifier and URL. + """ + response = self.request( + self._prepare_url(ENDPOINT_FILES_UPLOAD), + HTTPMethod.POST, + data=req.model_dump(), + files=[("file", file)], + **kwargs, + ) + return models.UploadFileResponse(**response.json()) + + def completion_messages(self, req: models.CompletionRequest, **kwargs) \ + -> Union[models.CompletionResponse, Iterator[models.CompletionStreamResponse]]: + """ + Sends a request to generate a completion or a series of completions based on the provided input. + + Returns: + If the response mode is blocking, it returns a `CompletionResponse` object containing the generated message. + If the response mode is streaming, it returns an iterator of `CompletionStreamResponse` objects containing + the stream of generated events. + """ + if req.response_mode == models.ResponseMode.BLOCKING: + return self._completion_messages(req, **kwargs) + if req.response_mode == models.ResponseMode.STREAMING: + return self._completion_messages_stream(req, **kwargs) + raise ValueError(f"Invalid request_mode: {req.response_mode}") + + def _completion_messages(self, req: models.CompletionRequest, **kwargs) -> models.CompletionResponse: + response = self.request( + self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.CompletionResponse(**response.json()) + + def _completion_messages_stream(self, req: models.CompletionRequest, **kwargs) \ + -> Iterator[models.CompletionStreamResponse]: + event_source = self.request_stream( + self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + for sse in event_source: + yield models.build_completion_stream_response(sse.json()) + + def stop_completion_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + """ + Sends a request to stop a streaming completion task. + + Returns: + A `StopResponse` object indicating the success of the operation. + """ + return self._stop_stream(self._prepare_url(ENDPOINT_STOP_COMPLETION_MESSAGES, task_id=task_id), req, **kwargs) + + def chat_messages(self, req: models.ChatRequest, **kwargs) \ + -> Union[models.ChatResponse, Iterator[models.ChatStreamResponse]]: + """ + Sends a request to generate a chat message or a series of chat messages based on the provided input. + + Returns: + If the response mode is blocking, it returns a `ChatResponse` object containing the generated chat message. + If the response mode is streaming, it returns an iterator of `ChatStreamResponse` objects containing the + stream of chat events. + """ + if req.response_mode == models.ResponseMode.BLOCKING: + return self._chat_messages(req, **kwargs) + if req.response_mode == models.ResponseMode.STREAMING: + return self._chat_messages_stream(req, **kwargs) + raise ValueError(f"Invalid request_mode: {req.response_mode}") + + def _chat_messages(self, req: models.ChatRequest, **kwargs) -> models.ChatResponse: + response = self.request( + self._prepare_url(ENDPOINT_CHAT_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.ChatResponse(**response.json()) + + def _chat_messages_stream(self, req: models.ChatRequest, **kwargs) -> Iterator[models.ChatStreamResponse]: + event_source = self.request_stream( + self._prepare_url(ENDPOINT_CHAT_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + for sse in event_source: + yield models.build_chat_stream_response(sse.json()) + + def stop_chat_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + """ + Sends a request to stop a streaming chat task. + + Returns: + A `StopResponse` object indicating the success of the operation. + """ + return self._stop_stream(self._prepare_url(ENDPOINT_STOP_CHAT_MESSAGES, task_id=task_id), req, **kwargs) + + def run_workflows(self, req: models.WorkflowsRunRequest, **kwargs) \ + -> Union[models.WorkflowsRunResponse, Iterator[models.WorkflowsRunStreamResponse]]: + """ + Initiates the execution of a workflow, which can consist of multiple steps and actions. + + Returns: + If the response mode is blocking, it returns a `WorkflowsRunResponse` object containing the results of the + completed workflow. + If the response mode is streaming, it returns an iterator of `WorkflowsRunStreamResponse` objects + containing the stream of workflow events. + """ + if req.response_mode == models.ResponseMode.BLOCKING: + return self._run_workflows(req, **kwargs) + if req.response_mode == models.ResponseMode.STREAMING: + return self._run_workflows_stream(req, **kwargs) + raise ValueError(f"Invalid request_mode: {req.response_mode}") + + def _run_workflows(self, req: models.WorkflowsRunRequest, **kwargs) -> models.WorkflowsRunResponse: + response = self.request( + self._prepare_url(ENDPOINT_RUN_WORKFLOWS), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.WorkflowsRunResponse(**response.json()) + + def _run_workflows_stream(self, req: models.WorkflowsRunRequest, **kwargs) \ + -> Iterator[models.WorkflowsRunStreamResponse]: + event_source = self.request_stream( + self._prepare_url(ENDPOINT_RUN_WORKFLOWS), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + for sse in event_source: + yield models.build_workflows_stream_response(sse.json()) + + def stop_workflows(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + """ + Sends a request to stop a streaming workflow task. + + Returns: + A `StopResponse` object indicating the success of the operation. + """ + return self._stop_stream(self._prepare_url(ENDPOINT_STOP_WORKFLOWS, task_id=task_id), req, **kwargs) + + def _stop_stream(self, endpoint: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + response = self.request( + endpoint, + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.StopResponse(**response.json()) + + def _prepare_url(self, endpoint: str, **kwargs) -> str: + return self.api_base + endpoint.format(**kwargs) + + def _prepare_auth_headers(self, headers: Dict[str, str]): + if "authorization" not in (key.lower() for key in headers.keys()): + headers["Authorization"] = f"Bearer {self.api_key}" + + +class AsyncClient(BaseModel): + api_key: str + api_base: Optional[str] = "https://api.dify.ai/v1" + + async def arequest(self, endpoint: str, method: str, + content: Optional[types.RequestContent] = None, + data: Optional[types.RequestData] = None, + files: Optional[types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[types.QueryParamTypes] = None, + headers: Optional[Mapping[str, str]] = None, + **kwargs, + ) -> httpx.Response: + """ + Asynchronously sends a request to the specified Dify API endpoint. + + Args: + endpoint: The endpoint URL to which the request is sent. + method: The HTTP method to be used for the request (e.g., 'GET', 'POST'). + content: Raw content to include in the request body, if any. + data: Form data to be sent in the request body. + files: Files to be uploaded with the request. + json: JSON data to be sent in the request body. + params: Query parameters to be included in the request URL. + headers: Additional headers to be sent with the request. + **kwargs: Extra keyword arguments to be passed to the underlying HTTPX request function. + + Returns: + A httpx.Response object containing the server's response to the HTTP request. + + Raises: + Various DifyAPIError exceptions if the response contains an error. + """ + merged_headers = {} + if headers: + merged_headers.update(headers) + self._prepare_auth_headers(merged_headers) + + response = await _async_httpx_client.request(method, endpoint, content=content, data=data, files=files, + json=json, params=params, headers=merged_headers, **kwargs) + errors.raise_for_status(response) + return response + + async def arequest_stream(self, endpoint: str, method: str, + content: Optional[types.RequestContent] = None, + data: Optional[types.RequestData] = None, + files: Optional[types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[types.QueryParamTypes] = None, + headers: Optional[Mapping[str, str]] = None, + **kwargs, + ) -> AsyncIterator[ServerSentEvent]: + """ + Asynchronously establishes a streaming connection to the specified Dify API endpoint. + + Args: + endpoint: The endpoint URL to which the request is sent. + method: The HTTP method to be used for the request (e.g., 'GET', 'POST'). + content: Raw content to include in the request body, if any. + data: Form data to be sent in the request body. + files: Files to be uploaded with the request. + json: JSON data to be sent in the request body. + params: Query parameters to be included in the request URL. + headers: Additional headers to be sent with the request. + **kwargs: Extra keyword arguments to be passed to the underlying HTTPX request function. + + Yields: + ServerSentEvent objects representing the events received from the server. + + Raises: + Various DifyAPIError exceptions if an error event is received in the stream. + """ + merged_headers = {} + if headers: + merged_headers.update(headers) + self._prepare_auth_headers(merged_headers) + + async with aconnect_sse(_async_httpx_client, method, endpoint, headers=merged_headers, + content=content, data=data, files=files, json=json, params=params, + **kwargs) as event_source: + if not _check_stream_content_type(event_source.response): + await event_source.response.aread() + errors.raise_for_status(event_source.response) + async for sse in event_source.aiter_sse(): + errors.raise_for_status(sse) + if sse.event in IGNORED_STREAM_EVENTS or sse.data in IGNORED_STREAM_EVENTS: + continue + yield sse + + async def afeedback_messages(self, message_id: str, req: models.FeedbackRequest, **kwargs) \ + -> models.FeedbackResponse: + """ + Submits feedback for a specific message. + + Args: + message_id: The identifier of the message to submit feedback for. + req: A `FeedbackRequest` object containing the feedback details, such as the rating. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + A `FeedbackResponse` object containing the result of the feedback submission. + """ + response = await self.arequest( + self._prepare_url(ENDPOINT_FEEDBACKS, message_id=message_id), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.FeedbackResponse(**response.json()) + + async def asuggest_messages(self, message_id: str, req: models.ChatSuggestRequest, **kwargs) \ + -> models.ChatSuggestResponse: + """ + Retrieves suggested messages based on a specific message. + + Args: + message_id: The identifier of the message to get suggestions for. + req: A `ChatSuggestRequest` object containing the request details. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + A `ChatSuggestResponse` object containing suggested messages. + """ + response = await self.arequest( + self._prepare_url(ENDPOINT_SUGGESTED, message_id=message_id), + HTTPMethod.GET, + params=req.model_dump(), + **kwargs, + ) + return models.ChatSuggestResponse(**response.json()) + + async def aupload_files(self, file: types.FileTypes, req: models.UploadFileRequest, **kwargs) \ + -> models.UploadFileResponse: + """ + Uploads a file to be used in subsequent requests. + + Args: + file: The file to upload. This can be a file-like object, or a tuple of + (`filename`, file-like object, mime_type). + req: An `UploadFileRequest` object containing the upload details, such as the user who is uploading. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + An `UploadFileResponse` object containing details about the uploaded file, such as its identifier and URL. + """ + response = await self.arequest( + self._prepare_url(ENDPOINT_FILES_UPLOAD), + HTTPMethod.POST, + data=req.model_dump(), + files=[("file", file)], + **kwargs, + ) + return models.UploadFileResponse(**response.json()) + + async def acompletion_messages(self, req: models.CompletionRequest, **kwargs) \ + -> Union[models.CompletionResponse, AsyncIterator[models.CompletionStreamResponse]]: + """ + Sends a request to generate a completion or a series of completions based on the provided input. + + Returns: + If the response mode is blocking, it returns a `CompletionResponse` object containing the generated message. + If the response mode is streaming, it returns an iterator of `CompletionStreamResponse` objects containing + the stream of generated events. + """ + if req.response_mode == models.ResponseMode.BLOCKING: + return await self._acompletion_messages(req, **kwargs) + if req.response_mode == models.ResponseMode.STREAMING: + return self._acompletion_messages_stream(req, **kwargs) + raise ValueError(f"Invalid request_mode: {req.response_mode}") + + async def _acompletion_messages(self, req: models.CompletionRequest, **kwargs) -> models.CompletionResponse: + response = await self.arequest( + self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.CompletionResponse(**response.json()) + + async def _acompletion_messages_stream(self, req: models.CompletionRequest, **kwargs) \ + -> AsyncIterator[models.CompletionStreamResponse]: + async for sse in self.arequest_stream( + self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs): + yield models.build_completion_stream_response(sse.json()) + + async def astop_completion_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + """ + Sends a request to stop a streaming completion task. + + Returns: + A `StopResponse` object indicating the success of the operation. + """ + return await self._astop_stream( + self._prepare_url(ENDPOINT_STOP_COMPLETION_MESSAGES, task_id=task_id), req, **kwargs) + + async def achat_messages(self, req: models.ChatRequest, **kwargs) \ + -> Union[models.ChatResponse, AsyncIterator[models.ChatStreamResponse]]: + """ + Sends a request to generate a chat message or a series of chat messages based on the provided input. + + Returns: + If the response mode is blocking, it returns a `ChatResponse` object containing the generated chat message. + If the response mode is streaming, it returns an iterator of `ChatStreamResponse` objects containing the + stream of chat events. + """ + if req.response_mode == models.ResponseMode.BLOCKING: + return await self._achat_messages(req, **kwargs) + if req.response_mode == models.ResponseMode.STREAMING: + return self._achat_messages_stream(req, **kwargs) + raise ValueError(f"Invalid request_mode: {req.response_mode}") + + async def _achat_messages(self, req: models.ChatRequest, **kwargs) -> models.ChatResponse: + response = await self.arequest( + self._prepare_url(ENDPOINT_CHAT_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.ChatResponse(**response.json()) + + async def _achat_messages_stream(self, req: models.ChatRequest, **kwargs) \ + -> AsyncIterator[models.ChatStreamResponse]: + async for sse in self.arequest_stream( + self._prepare_url(ENDPOINT_CHAT_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs): + yield models.build_chat_stream_response(sse.json()) + + async def astop_chat_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + """ + Sends a request to stop a streaming chat task. + + Returns: + A `StopResponse` object indicating the success of the operation. + """ + return await self._astop_stream(self._prepare_url(ENDPOINT_STOP_CHAT_MESSAGES, task_id=task_id), req, **kwargs) + + async def arun_workflows(self, req: models.WorkflowsRunRequest, **kwargs) \ + -> Union[models.WorkflowsRunResponse, AsyncIterator[models.WorkflowsStreamResponse]]: + """ + Initiates the execution of a workflow, which can consist of multiple steps and actions. + + Returns: + If the response mode is blocking, it returns a `WorkflowsRunResponse` object containing the results of the + completed workflow. + If the response mode is streaming, it returns an iterator of `WorkflowsRunStreamResponse` objects + containing the stream of workflow events. + """ + if req.response_mode == models.ResponseMode.BLOCKING: + return await self._arun_workflows(req, **kwargs) + if req.response_mode == models.ResponseMode.STREAMING: + return self._arun_workflows_stream(req, **kwargs) + raise ValueError(f"Invalid request_mode: {req.response_mode}") + + async def _arun_workflows(self, req: models.WorkflowsRunRequest, **kwargs) -> models.WorkflowsRunResponse: + response = await self.arequest( + self._prepare_url(ENDPOINT_RUN_WORKFLOWS), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.WorkflowsRunResponse(**response.json()) + + async def _arun_workflows_stream(self, req: models.WorkflowsRunRequest, **kwargs) \ + -> AsyncIterator[models.WorkflowsRunStreamResponse]: + async for sse in self.arequest_stream( + self._prepare_url(ENDPOINT_RUN_WORKFLOWS), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs): + yield models.build_workflows_stream_response(sse.json()) + + async def astop_workflows(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + """ + Sends a request to stop a streaming workflow task. + + Returns: + A `StopResponse` object indicating the success of the operation. + """ + return await self._astop_stream(self._prepare_url(ENDPOINT_STOP_WORKFLOWS, task_id=task_id), req, **kwargs) + + async def _astop_stream(self, endpoint: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + response = await self.arequest( + endpoint, + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.StopResponse(**response.json()) + + def _prepare_url(self, endpoint: str, **kwargs) -> str: + return self.api_base + endpoint.format(**kwargs) + + def _prepare_auth_headers(self, headers: Dict[str, str]): + if "authorization" not in (key.lower() for key in headers.keys()): + headers["Authorization"] = f"Bearer {self.api_key}" + + +def _get_content_type(headers: httpx.Headers) -> str: + return headers.get("content-type", "").partition(";")[0] + + +def _check_stream_content_type(response: httpx.Response) -> bool: + content_type = _get_content_type(response.headers) + return response.is_success and "text/event-stream" in content_type diff --git a/dify_client_python/build/lib/dify_client/errors.py b/dify_client_python/build/lib/dify_client/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..8d268fd164d6727ccd5c38cb51515324f59fc028 --- /dev/null +++ b/dify_client_python/build/lib/dify_client/errors.py @@ -0,0 +1,132 @@ +from http import HTTPStatus +from typing import Union + +import httpx +import httpx_sse + +from dify_client import models + + +class DifyAPIError(Exception): + def __init__(self, status: int, code: str, message: str): + super().__init__(f"status_code={status}, code={code}, {message}") + self.status = status + self.code = code + self.message = message + + +class DifyInvalidParam(DifyAPIError): + pass + + +class DifyNotChatApp(DifyAPIError): + pass + + +class DifyResourceNotFound(DifyAPIError): + pass + + +class DifyAppUnavailable(DifyAPIError): + pass + + +class DifyProviderNotInitialize(DifyAPIError): + pass + + +class DifyProviderQuotaExceeded(DifyAPIError): + pass + + +class DifyModelCurrentlyNotSupport(DifyAPIError): + pass + + +class DifyCompletionRequestError(DifyAPIError): + pass + + +class DifyInternalServerError(DifyAPIError): + pass + + +class DifyNoFileUploaded(DifyAPIError): + pass + + +class DifyTooManyFiles(DifyAPIError): + pass + + +class DifyUnsupportedPreview(DifyAPIError): + pass + + +class DifyUnsupportedEstimate(DifyAPIError): + pass + + +class DifyFileTooLarge(DifyAPIError): + pass + + +class DifyUnsupportedFileType(DifyAPIError): + pass + + +class DifyS3ConnectionFailed(DifyAPIError): + pass + + +class DifyS3PermissionDenied(DifyAPIError): + pass + + +class DifyS3FileTooLarge(DifyAPIError): + pass + + +SPEC_CODE_ERRORS = { + # completion & chat & workflow + "invalid_param": DifyInvalidParam, + "not_chat_app": DifyNotChatApp, + "app_unavailable": DifyAppUnavailable, + "provider_not_initialize": DifyProviderNotInitialize, + "provider_quota_exceeded": DifyProviderQuotaExceeded, + "model_currently_not_support": DifyModelCurrentlyNotSupport, + "completion_request_error": DifyCompletionRequestError, + # files upload + "no_file_uploaded": DifyNoFileUploaded, + "too_many_files": DifyTooManyFiles, + "unsupported_preview": DifyUnsupportedPreview, + "unsupported_estimate": DifyUnsupportedEstimate, + "file_too_large": DifyFileTooLarge, + "unsupported_file_type": DifyUnsupportedFileType, + "s3_connection_failed": DifyS3ConnectionFailed, + "s3_permission_denied": DifyS3PermissionDenied, + "s3_file_too_large": DifyS3FileTooLarge, +} + + +def raise_for_status(response: Union[httpx.Response, httpx_sse.ServerSentEvent]): + if isinstance(response, httpx.Response): + if response.is_success: + return + json = response.json() + if "status" not in json: + json["status"] = response.status_code + details = models.ErrorResponse(**json) + elif isinstance(response, httpx_sse.ServerSentEvent): + if response.event != models.StreamEvent.ERROR.value: + return + details = models.ErrorStreamResponse(**response.json()) + else: + raise ValueError(f"Invalid dify response type: {type(response)}") + + if details.status == HTTPStatus.NOT_FOUND: + raise DifyResourceNotFound(details.status, details.code, details.message) + elif details.status == HTTPStatus.INTERNAL_SERVER_ERROR: + raise DifyInternalServerError(details.status, details.code, details.message) + else: + raise SPEC_CODE_ERRORS.get(details.code, DifyAPIError)(details.status, details.code, details.message) diff --git a/dify_client_python/build/lib/dify_client/models/__init__.py b/dify_client_python/build/lib/dify_client/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9a9aa5fb121b51222c290f4af7fcb3ea3fe070e7 --- /dev/null +++ b/dify_client_python/build/lib/dify_client/models/__init__.py @@ -0,0 +1,7 @@ +from .chat import * +from .completion import * +from .feedback import * +from .file import * +from .workflow import * +from .stream import * +from .base import StopRequest, StopResponse diff --git a/dify_client_python/build/lib/dify_client/models/base.py b/dify_client_python/build/lib/dify_client/models/base.py new file mode 100644 index 0000000000000000000000000000000000000000..1b1fd9c5abd0d4a4349cd368b6c88210e80a3431 --- /dev/null +++ b/dify_client_python/build/lib/dify_client/models/base.py @@ -0,0 +1,93 @@ +try: + from enum import StrEnum +except ImportError: + from strenum import StrEnum +from http import HTTPStatus +from typing import Optional, List + +from pydantic import BaseModel, ConfigDict + + +class Mode(StrEnum): + CHAT = "chat" + COMPLETION = "completion" + + +class ResponseMode(StrEnum): + STREAMING = 'streaming' + BLOCKING = 'blocking' + + +class FileType(StrEnum): + IMAGE = "image" + + +class TransferMethod(StrEnum): + REMOTE_URL = "remote_url" + LOCAL_FILE = "local_file" + + +# Allows the entry of various variable values defined by the App. +# The inputs parameter contains multiple key/value pairs, with each key corresponding to a specific variable and +# each value being the specific value for that variable. +# The text generation application requires at least one key/value pair to be inputted. +class CompletionInputs(BaseModel): + model_config = ConfigDict(extra='allow') + # Required The input text, the content to be processed. + query: str + + +class File(BaseModel): + type: FileType + transfer_method: TransferMethod + url: Optional[str] + # Uploaded file ID, which must be obtained by uploading through the File Upload API in advance + # (when the transfer method is local_file) + upload_file_id: Optional[str] + + +class Usage(BaseModel): + prompt_tokens: int + completion_tokens: int + total_tokens: int + + prompt_unit_price: str + prompt_price_unit: str + prompt_price: str + completion_unit_price: str + completion_price_unit: str + completion_price: str + total_price: str + currency: str + + latency: float + + +class RetrieverResource(BaseModel): + position: int + dataset_id: str + dataset_name: str + document_id: str + document_name: str + segment_id: str + score: float + content: str + + +class Metadata(BaseModel): + usage: Usage + retriever_resources: List[RetrieverResource] = [] + + +class StopRequest(BaseModel): + user: str + + +class StopResponse(BaseModel): + result: str # success + + +class ErrorResponse(BaseModel): + status: int = HTTPStatus.INTERNAL_SERVER_ERROR # HTTP status code + code: str = "" + message: str = "" diff --git a/dify_client_python/build/lib/dify_client/models/chat.py b/dify_client_python/build/lib/dify_client/models/chat.py new file mode 100644 index 0000000000000000000000000000000000000000..1dff07a4fa21361a55913b4fd8d34b1d5af548b6 --- /dev/null +++ b/dify_client_python/build/lib/dify_client/models/chat.py @@ -0,0 +1,29 @@ +from typing import Dict, List, Optional, Any + +from pydantic import BaseModel, Field + +from dify_client.models.base import ResponseMode, File +from dify_client.models.completion import CompletionResponse + + +class ChatRequest(BaseModel): + query: str + inputs: Dict[str, Any] = Field(default_factory=dict) + response_mode: ResponseMode + user: str + conversation_id: Optional[str] = "" + files: List[File] = [] + auto_generate_name: bool = True + + +class ChatResponse(CompletionResponse): + pass + + +class ChatSuggestRequest(BaseModel): + user: str + + +class ChatSuggestResponse(BaseModel): + result: str + data: List[str] = [] diff --git a/dify_client_python/build/lib/dify_client/models/completion.py b/dify_client_python/build/lib/dify_client/models/completion.py new file mode 100644 index 0000000000000000000000000000000000000000..e4b1832e167cef145e49748c4844713b7aa7390a --- /dev/null +++ b/dify_client_python/build/lib/dify_client/models/completion.py @@ -0,0 +1,22 @@ +from typing import Optional, List + +from pydantic import BaseModel + +from dify_client.models.base import CompletionInputs, ResponseMode, File, Metadata, Mode + + +class CompletionRequest(BaseModel): + inputs: CompletionInputs + response_mode: ResponseMode + user: str + conversation_id: Optional[str] = "" + files: List[File] = [] + + +class CompletionResponse(BaseModel): + message_id: str + conversation_id: Optional[str] = "" + mode: Mode + answer: str + metadata: Metadata + created_at: int # unix timestamp seconds diff --git a/dify_client_python/build/lib/dify_client/models/feedback.py b/dify_client_python/build/lib/dify_client/models/feedback.py new file mode 100644 index 0000000000000000000000000000000000000000..aae0242dc054f728db4e6f0a55b0e6d80b2ae0dd --- /dev/null +++ b/dify_client_python/build/lib/dify_client/models/feedback.py @@ -0,0 +1,21 @@ +try: + from enum import StrEnum +except ImportError: + from strenum import StrEnum +from typing import Optional + +from pydantic import BaseModel + + +class Rating(StrEnum): + LIKE = "like" + DISLIKE = "dislike" + + +class FeedbackRequest(BaseModel): + rating: Optional[Rating] = None + user: str + + +class FeedbackResponse(BaseModel): + result: str # success diff --git a/dify_client_python/build/lib/dify_client/models/file.py b/dify_client_python/build/lib/dify_client/models/file.py new file mode 100644 index 0000000000000000000000000000000000000000..9788f7ab867ba886aec59e414875a222512d3bf2 --- /dev/null +++ b/dify_client_python/build/lib/dify_client/models/file.py @@ -0,0 +1,15 @@ +from pydantic import BaseModel + + +class UploadFileRequest(BaseModel): + user: str + + +class UploadFileResponse(BaseModel): + id: str + name: str + size: int + extension: str + mime_type: str + created_by: str # created by user + created_at: int # unix timestamp seconds diff --git a/dify_client_python/build/lib/dify_client/models/stream.py b/dify_client_python/build/lib/dify_client/models/stream.py new file mode 100644 index 0000000000000000000000000000000000000000..137bd63a1207e48e0a7ae33b39d0c9680ce8dbc8 --- /dev/null +++ b/dify_client_python/build/lib/dify_client/models/stream.py @@ -0,0 +1,186 @@ +try: + from enum import StrEnum +except ImportError: + from strenum import StrEnum +from typing import Union, Optional, List + +from pydantic import BaseModel, ConfigDict, field_validator + +from dify_client import utils +from dify_client.models.base import Metadata, ErrorResponse +from dify_client.models.workflow import WorkflowStartedData, WorkflowFinishedData, NodeStartedData, NodeFinishedData + +STREAM_EVENT_KEY = "event" + + +class StreamEvent(StrEnum): + MESSAGE = "message" + AGENT_MESSAGE = "agent_message" + AGENT_THOUGHT = "agent_thought" + MESSAGE_FILE = "message_file" # need to show file + WORKFLOW_STARTED = "workflow_started" + NODE_STARTED = "node_started" + NODE_FINISHED = "node_finished" + WORKFLOW_FINISHED = "workflow_finished" + MESSAGE_END = "message_end" + MESSAGE_REPLACE = "message_replace" + ERROR = "error" + PING = "ping" + + @classmethod + def new(cls, event: Union["StreamEvent", str]) -> "StreamEvent": + if isinstance(event, cls): + return event + return utils.str_to_enum(cls, event) + + +class StreamResponse(BaseModel): + model_config = ConfigDict(extra='allow') + + event: StreamEvent + task_id: Optional[str] = "" + + @field_validator("event", mode="before") + def transform_stream_event(cls, event: Union[StreamEvent, str]) -> StreamEvent: + return StreamEvent.new(event) + + +class PingResponse(StreamResponse): + pass + + +class ErrorStreamResponse(StreamResponse, ErrorResponse): + message_id: Optional[str] = "" + + +class MessageStreamResponse(StreamResponse): + message_id: str + conversation_id: Optional[str] = "" + answer: str + created_at: int # unix timestamp seconds + + +class MessageEndStreamResponse(StreamResponse): + message_id: str + conversation_id: Optional[str] = "" + created_at: int # unix timestamp seconds + metadata: Optional[Metadata] + + +class MessageReplaceStreamResponse(MessageStreamResponse): + pass + + +class AgentMessageStreamResponse(MessageStreamResponse): + pass + + +class AgentThoughtStreamResponse(StreamResponse): + id: str # agent thought id + message_id: str + conversation_id: str + position: int # thought position, start from 1 + thought: str + observation: str + tool: str + tool_input: str + message_files: List[str] = [] + created_at: int # unix timestamp seconds + + +class MessageFileStreamResponse(StreamResponse): + id: str # file id + conversation_id: str + type: str # only image + belongs_to: str # assistant + url: str + + +class WorkflowsStreamResponse(StreamResponse): + workflow_run_id: str + data: Optional[Union[ + WorkflowStartedData, + WorkflowFinishedData, + NodeStartedData, + NodeFinishedData] + ] + + +class ChatWorkflowsStreamResponse(WorkflowsStreamResponse): + message_id: str + conversation_id: str + created_at: int + + +_COMPLETION_EVENT_TO_STREAM_RESP_MAPPING = { + StreamEvent.PING: PingResponse, + StreamEvent.MESSAGE: MessageStreamResponse, + StreamEvent.MESSAGE_END: MessageEndStreamResponse, + StreamEvent.MESSAGE_REPLACE: MessageReplaceStreamResponse, +} + +CompletionStreamResponse = Union[ + PingResponse, + MessageStreamResponse, + MessageEndStreamResponse, + MessageReplaceStreamResponse, +] + + +def build_completion_stream_response(data: dict) -> CompletionStreamResponse: + event = StreamEvent.new(data.get(STREAM_EVENT_KEY)) + return _COMPLETION_EVENT_TO_STREAM_RESP_MAPPING.get(event, StreamResponse)(**data) + + +_CHAT_EVENT_TO_STREAM_RESP_MAPPING = { + StreamEvent.PING: PingResponse, + # chat + StreamEvent.MESSAGE: MessageStreamResponse, + StreamEvent.MESSAGE_END: MessageEndStreamResponse, + StreamEvent.MESSAGE_REPLACE: MessageReplaceStreamResponse, + StreamEvent.MESSAGE_FILE: MessageFileStreamResponse, + # agent + StreamEvent.AGENT_MESSAGE: AgentMessageStreamResponse, + StreamEvent.AGENT_THOUGHT: AgentThoughtStreamResponse, + # workflow + StreamEvent.WORKFLOW_STARTED: WorkflowsStreamResponse, + StreamEvent.NODE_STARTED: WorkflowsStreamResponse, + StreamEvent.NODE_FINISHED: WorkflowsStreamResponse, + StreamEvent.WORKFLOW_FINISHED: WorkflowsStreamResponse, +} + +ChatStreamResponse = Union[ + PingResponse, + MessageStreamResponse, + MessageEndStreamResponse, + MessageReplaceStreamResponse, + MessageFileStreamResponse, + AgentMessageStreamResponse, + AgentThoughtStreamResponse, + WorkflowsStreamResponse, +] + + +def build_chat_stream_response(data: dict) -> ChatStreamResponse: + event = StreamEvent.new(data.get(STREAM_EVENT_KEY)) + return _CHAT_EVENT_TO_STREAM_RESP_MAPPING.get(event, StreamResponse)(**data) + + +_WORKFLOW_EVENT_TO_STREAM_RESP_MAPPING = { + StreamEvent.PING: PingResponse, + # workflow + StreamEvent.WORKFLOW_STARTED: WorkflowsStreamResponse, + StreamEvent.NODE_STARTED: WorkflowsStreamResponse, + StreamEvent.NODE_FINISHED: WorkflowsStreamResponse, + StreamEvent.WORKFLOW_FINISHED: WorkflowsStreamResponse, +} + +WorkflowsRunStreamResponse = Union[ + PingResponse, + WorkflowsStreamResponse, +] + + +def build_workflows_stream_response(data: dict) -> WorkflowsRunStreamResponse: + event = StreamEvent.new(data.get(STREAM_EVENT_KEY)) + return _WORKFLOW_EVENT_TO_STREAM_RESP_MAPPING.get(event, StreamResponse)(**data) diff --git a/dify_client_python/build/lib/dify_client/models/workflow.py b/dify_client_python/build/lib/dify_client/models/workflow.py new file mode 100644 index 0000000000000000000000000000000000000000..93a1e01c0d6974db8aa2f2b0e6052b02bf50cf05 --- /dev/null +++ b/dify_client_python/build/lib/dify_client/models/workflow.py @@ -0,0 +1,91 @@ +try: + from enum import StrEnum +except ImportError: + from strenum import StrEnum +from typing import Dict, List, Optional + +from pydantic import BaseModel + +from dify_client.models.base import ResponseMode, File + + +class WorkflowStatus(StrEnum): + RUNNING = "running" + SUCCEEDED = "succeeded" + FAILED = "failed" + STOPPED = "stopped" + + +class ExecutionMetadata(BaseModel): + total_tokens: Optional[int] + total_price: Optional[str] + currency: Optional[str] + + +class WorkflowStartedData(BaseModel): + id: str # workflow run id + workflow_id: str # workflow id + sequence_number: int + inputs: Optional[dict] = None + created_at: int # unix timestamp seconds + + +class NodeStartedData(BaseModel): + id: str # workflow run id + node_id: str + node_type: str + title: str + index: int + predecessor_node_id: Optional[str] = None + inputs: Optional[dict] = None + created_at: int + extras: dict = {} + + +class NodeFinishedData(BaseModel): + id: str # workflow run id + node_id: str + node_type: str + title: str + index: int + predecessor_node_id: Optional[str] = None + inputs: Optional[dict] = None + process_data: Optional[dict] = None + outputs: Optional[dict] = {} + status: WorkflowStatus + error: Optional[str] = None + elapsed_time: Optional[float] # seconds + execution_metadata: Optional[ExecutionMetadata] = None + created_at: int + finished_at: int + files: List = [] + + +class WorkflowFinishedData(BaseModel): + id: str # workflow run id + workflow_id: str # workflow id + sequence_number: int + status: WorkflowStatus + outputs: Optional[dict] + error: Optional[str] + elapsed_time: Optional[float] + total_tokens: Optional[int] + total_steps: Optional[int] = 0 + created_at: int + finished_at: int + created_by: dict = {} + files: List = [] + + +class WorkflowsRunRequest(BaseModel): + inputs: Dict = {} + response_mode: ResponseMode + user: str + conversation_id: Optional[str] = "" + files: List[File] = [] + + +class WorkflowsRunResponse(BaseModel): + log_id: str + task_id: str + data: WorkflowFinishedData diff --git a/dify_client_python/build/lib/dify_client/utils/__init__.py b/dify_client_python/build/lib/dify_client/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c897d034346359c806e5c85c77fb2a385d34ec87 --- /dev/null +++ b/dify_client_python/build/lib/dify_client/utils/__init__.py @@ -0,0 +1 @@ +from ._common import * diff --git a/dify_client_python/build/lib/dify_client/utils/_common.py b/dify_client_python/build/lib/dify_client/utils/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..ee76f52817da8008fc3a8109514d81ff17f68da5 --- /dev/null +++ b/dify_client_python/build/lib/dify_client/utils/_common.py @@ -0,0 +1,7 @@ +def str_to_enum(str_enum_class, str_value: str, ignore_not_found: bool = False, enum_default=None): + for key, member in str_enum_class.__members__.items(): + if str_value == member.value: + return member + if ignore_not_found: + return enum_default + raise ValueError(f"Invalid enum value: {str_value}") diff --git a/dify_client_python/dify_client/.DS_Store b/dify_client_python/dify_client/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..a89bc8560a4b4c4faf32aa9a49d94970b2dd8882 Binary files /dev/null and b/dify_client_python/dify_client/.DS_Store differ diff --git a/dify_client_python/dify_client/__init__.py b/dify_client_python/dify_client/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..77492cb3e6f5518a52223c79d01534cb0aea4d27 --- /dev/null +++ b/dify_client_python/dify_client/__init__.py @@ -0,0 +1 @@ +from ._clientx import Client, AsyncClient diff --git a/dify_client_python/dify_client/__pycache__/__init__.cpython-310.pyc b/dify_client_python/dify_client/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32cc15fe2de6e13b9f3ba27ad9381ffe46711877 Binary files /dev/null and b/dify_client_python/dify_client/__pycache__/__init__.cpython-310.pyc differ diff --git a/dify_client_python/dify_client/__pycache__/__init__.cpython-312.pyc b/dify_client_python/dify_client/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c441c7d29c916df921d35876d2320db6ee6be56a Binary files /dev/null and b/dify_client_python/dify_client/__pycache__/__init__.cpython-312.pyc differ diff --git a/dify_client_python/dify_client/__pycache__/_clientx.cpython-310.pyc b/dify_client_python/dify_client/__pycache__/_clientx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5919a8bb70cb63bcd5fa5973d9c44ee0b952f6e Binary files /dev/null and b/dify_client_python/dify_client/__pycache__/_clientx.cpython-310.pyc differ diff --git a/dify_client_python/dify_client/__pycache__/_clientx.cpython-312.pyc b/dify_client_python/dify_client/__pycache__/_clientx.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89d4adf8ee55410e370df9625ba92bbbd2a0d809 Binary files /dev/null and b/dify_client_python/dify_client/__pycache__/_clientx.cpython-312.pyc differ diff --git a/dify_client_python/dify_client/__pycache__/errors.cpython-310.pyc b/dify_client_python/dify_client/__pycache__/errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c56794c2ea9c93639888f2d7d50e2b818036a8f5 Binary files /dev/null and b/dify_client_python/dify_client/__pycache__/errors.cpython-310.pyc differ diff --git a/dify_client_python/dify_client/__pycache__/errors.cpython-312.pyc b/dify_client_python/dify_client/__pycache__/errors.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6073057c3116b89ad59a3a368a217bed1836c55f Binary files /dev/null and b/dify_client_python/dify_client/__pycache__/errors.cpython-312.pyc differ diff --git a/dify_client_python/dify_client/_clientx.py b/dify_client_python/dify_client/_clientx.py new file mode 100644 index 0000000000000000000000000000000000000000..2a9277d6470ffad2731bee191cf54c6725230378 --- /dev/null +++ b/dify_client_python/dify_client/_clientx.py @@ -0,0 +1,694 @@ +from typing import Optional, Any, Mapping, Iterator, AsyncIterator, Union, Dict, TypedDict + +try: + from enum import StrEnum +except ImportError: + from strenum import StrEnum +try: + from http import HTTPMethod +except ImportError: + class HTTPMethod(StrEnum): + GET = "GET" + POST = "POST" + PUT = "PUT" + DELETE = "DELETE" + +import httpx +# noinspection PyProtectedMember +import httpx._types as types +from pydantic import BaseModel + +from dify_client_python.dify_client import errors, models + +_httpx_client = httpx.Client() +_async_httpx_client = httpx.AsyncClient() + +IGNORED_STREAM_EVENTS = (models.StreamEvent.PING.value,) + +# feedback +ENDPOINT_FEEDBACKS = "/messages/{message_id}/feedbacks" +# suggest +ENDPOINT_SUGGESTED = "/messages/{message_id}/suggested" +# files upload +ENDPOINT_FILES_UPLOAD = "/files/upload" +# completion +ENDPOINT_COMPLETION_MESSAGES = "/completion-messages" +ENDPOINT_STOP_COMPLETION_MESSAGES = "/completion-messages/{task_id}/stop" +# chat +ENDPOINT_CHAT_MESSAGES = "/chat-messages" +ENDPOINT_STOP_CHAT_MESSAGES = "/chat-messages/{task_id}/stop" +# workflow +ENDPOINT_RUN_WORKFLOWS = "/workflows/run" +ENDPOINT_STOP_WORKFLOWS = "/workflows/{task_id}/stop" +# audio <-> text +ENDPOINT_TEXT_TO_AUDIO = "/text-to-audio" +ENDPOINT_AUDIO_TO_TEXT = "/audio-to-text" + + +class ServerSentEvent(TypedDict): + event: Optional[str] + data: str + id: Optional[str] + retry: Optional[int] + + +class Client(BaseModel): + api_key: str + api_base: Optional[str] = "https://api.dify.ai/v1" + + def request(self, endpoint: str, method: str, + content: Optional[types.RequestContent] = None, + data: Optional[types.RequestData] = None, + files: Optional[types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[types.QueryParamTypes] = None, + headers: Optional[Mapping[str, str]] = None, + **kwargs: object, + ) -> httpx.Response: + """ + Sends a synchronous HTTP request to the specified endpoint. + + Args: + endpoint: The API endpoint to send the request to. + method: The HTTP method to use (e.g., 'GET', 'POST'). + content: Raw content to include in the request body. + data: Form data to include in the request body. + files: Files to include in the request body. + json: JSON data to include in the request body. + params: Query parameters to include in the request URL. + headers: Additional headers to include in the request. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + A `httpx.Response` object containing the HTTP response. + + Raises: + Various DifyAPIError exceptions if the response contains an error. + """ + merged_headers = {} + if headers: + merged_headers.update(headers) + self._prepare_auth_headers(merged_headers) + + response = _httpx_client.request(method, endpoint, content=content, data=data, files=files, json=json, + params=params, headers=merged_headers, **kwargs) + errors.raise_for_status(response) + return response + + def request_stream(self, endpoint: str, method: str, + content: Optional[types.RequestContent] = None, + data: Optional[types.RequestData] = None, + files: Optional[types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[types.QueryParamTypes] = None, + headers: Optional[Mapping[str, str]] = None, + **kwargs, + ) -> Iterator[ServerSentEvent]: + """ + Opens a server-sent events (SSE) stream to the specified endpoint. + + Args: + endpoint: The API endpoint to send the request to. + method: The HTTP method to use (e.g., 'GET', 'POST'). + content: Raw content to include in the request body. + data: Form data to include in the request body. + files: Files to include in the request body. + json: JSON data to include in the request body. + params: Query parameters to include in the request URL. + headers: Additional headers to include in the request. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + An iterator of `ServerSentEvent` objects representing the stream of events. + + Raises: + Various DifyAPIError exceptions if an error event is received in the stream. + """ + merged_headers = { + 'Accept': 'text/event-stream', + 'Cache-Control': 'no-cache', + 'Connection': 'keep-alive' + } + if headers: + merged_headers.update(headers) + self._prepare_auth_headers(merged_headers) + + response = _httpx_client.stream( + method, + endpoint, + headers=merged_headers, + content=content, + data=data, + files=files, + json=json, + params=params, + **kwargs + ) + + with response as event_source: + if not _check_stream_content_type(event_source): + event_source.read() + errors.raise_for_status(event_source) + for line in event_source.iter_lines(): + if not line: + continue + if line.startswith(b'data: '): + data = line[6:].decode('utf-8') + try: + json_data = json.loads(data) + event = { + 'event': json_data.get('event'), + 'data': data, + 'id': None, + 'retry': None + } + if event['event'] in IGNORED_STREAM_EVENTS: + continue + yield event + except json.JSONDecodeError: + continue + + def feedback_messages(self, message_id: str, req: models.FeedbackRequest, **kwargs) -> models.FeedbackResponse: + """ + Submits feedback for a specific message. + + Args: + message_id: The identifier of the message to submit feedback for. + req: A `FeedbackRequest` object containing the feedback details, such as the rating. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + A `FeedbackResponse` object containing the result of the feedback submission. + """ + response = self.request( + self._prepare_url(ENDPOINT_FEEDBACKS, message_id=message_id), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.FeedbackResponse(**response.json()) + + def suggest_messages(self, message_id: str, req: models.ChatSuggestRequest, **kwargs) -> models.ChatSuggestResponse: + """ + Retrieves suggested messages based on a specific message. + + Args: + message_id: The identifier of the message to get suggestions for. + req: A `ChatSuggestRequest` object containing the request details. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + A `ChatSuggestResponse` object containing suggested messages. + """ + response = self.request( + self._prepare_url(ENDPOINT_SUGGESTED, message_id=message_id), + HTTPMethod.GET, + params=req.model_dump(), + **kwargs, + ) + return models.ChatSuggestResponse(**response.json()) + + def upload_files(self, file: types.FileTypes, req: models.UploadFileRequest, + **kwargs) -> models.UploadFileResponse: + """ + Uploads a file to be used in subsequent requests. + + Args: + file: The file to upload. This can be a file-like object, or a tuple of + (`filename`, file-like object, mime_type). + req: An `UploadFileRequest` object containing the upload details, such as the user who is uploading. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + An `UploadFileResponse` object containing details about the uploaded file, such as its identifier and URL. + """ + response = self.request( + self._prepare_url(ENDPOINT_FILES_UPLOAD), + HTTPMethod.POST, + data=req.model_dump(), + files=[("file", file)], + **kwargs, + ) + return models.UploadFileResponse(**response.json()) + + def completion_messages(self, req: models.CompletionRequest, **kwargs) \ + -> Union[models.CompletionResponse, Iterator[models.CompletionStreamResponse]]: + """ + Sends a request to generate a completion or a series of completions based on the provided input. + + Returns: + If the response mode is blocking, it returns a `CompletionResponse` object containing the generated message. + If the response mode is streaming, it returns an iterator of `CompletionStreamResponse` objects containing + the stream of generated events. + """ + if req.response_mode == models.ResponseMode.BLOCKING: + return self._completion_messages(req, **kwargs) + if req.response_mode == models.ResponseMode.STREAMING: + return self._completion_messages_stream(req, **kwargs) + raise ValueError(f"Invalid request_mode: {req.response_mode}") + + def _completion_messages(self, req: models.CompletionRequest, **kwargs) -> models.CompletionResponse: + response = self.request( + self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.CompletionResponse(**response.json()) + + def _completion_messages_stream(self, req: models.CompletionRequest, **kwargs) \ + -> Iterator[models.CompletionStreamResponse]: + event_source = self.request_stream( + self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + for sse in event_source: + yield models.build_completion_stream_response(sse.json()) + + def stop_completion_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + """ + Sends a request to stop a streaming completion task. + + Returns: + A `StopResponse` object indicating the success of the operation. + """ + return self._stop_stream(self._prepare_url(ENDPOINT_STOP_COMPLETION_MESSAGES, task_id=task_id), req, **kwargs) + + def chat_messages(self, req: models.ChatRequest, **kwargs) \ + -> Union[models.ChatResponse, Iterator[models.ChatStreamResponse]]: + """ + Sends a request to generate a chat message or a series of chat messages based on the provided input. + + Returns: + If the response mode is blocking, it returns a `ChatResponse` object containing the generated chat message. + If the response mode is streaming, it returns an iterator of `ChatStreamResponse` objects containing the + stream of chat events. + """ + if req.response_mode == models.ResponseMode.BLOCKING: + return self._chat_messages(req, **kwargs) + if req.response_mode == models.ResponseMode.STREAMING: + return self._chat_messages_stream(req, **kwargs) + raise ValueError(f"Invalid request_mode: {req.response_mode}") + + def _chat_messages(self, req: models.ChatRequest, **kwargs) -> models.ChatResponse: + response = self.request( + self._prepare_url(ENDPOINT_CHAT_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.ChatResponse(**response.json()) + + def _chat_messages_stream(self, req: models.ChatRequest, **kwargs) -> Iterator[models.ChatStreamResponse]: + event_source = self.request_stream( + self._prepare_url(ENDPOINT_CHAT_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + for sse in event_source: + yield models.build_chat_stream_response(sse.json()) + + def stop_chat_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + """ + Sends a request to stop a streaming chat task. + + Returns: + A `StopResponse` object indicating the success of the operation. + """ + return self._stop_stream(self._prepare_url(ENDPOINT_STOP_CHAT_MESSAGES, task_id=task_id), req, **kwargs) + + def run_workflows(self, req: models.WorkflowsRunRequest, **kwargs) \ + -> Union[models.WorkflowsRunResponse, Iterator[models.WorkflowsRunStreamResponse]]: + """ + Initiates the execution of a workflow, which can consist of multiple steps and actions. + + Returns: + If the response mode is blocking, it returns a `WorkflowsRunResponse` object containing the results of the + completed workflow. + If the response mode is streaming, it returns an iterator of `WorkflowsRunStreamResponse` objects + containing the stream of workflow events. + """ + if req.response_mode == models.ResponseMode.BLOCKING: + return self._run_workflows(req, **kwargs) + if req.response_mode == models.ResponseMode.STREAMING: + return self._run_workflows_stream(req, **kwargs) + raise ValueError(f"Invalid request_mode: {req.response_mode}") + + def _run_workflows(self, req: models.WorkflowsRunRequest, **kwargs) -> models.WorkflowsRunResponse: + response = self.request( + self._prepare_url(ENDPOINT_RUN_WORKFLOWS), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.WorkflowsRunResponse(**response.json()) + + def _run_workflows_stream(self, req: models.WorkflowsRunRequest, **kwargs) \ + -> Iterator[models.WorkflowsRunStreamResponse]: + event_source = self.request_stream( + self._prepare_url(ENDPOINT_RUN_WORKFLOWS), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + for sse in event_source: + yield models.build_workflows_stream_response(sse.json()) + + def stop_workflows(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + """ + Sends a request to stop a streaming workflow task. + + Returns: + A `StopResponse` object indicating the success of the operation. + """ + return self._stop_stream(self._prepare_url(ENDPOINT_STOP_WORKFLOWS, task_id=task_id), req, **kwargs) + + def _stop_stream(self, endpoint: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + response = self.request( + endpoint, + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.StopResponse(**response.json()) + + def _prepare_url(self, endpoint: str, **kwargs) -> str: + return self.api_base + endpoint.format(**kwargs) + + def _prepare_auth_headers(self, headers: Dict[str, str]): + if "authorization" not in (key.lower() for key in headers.keys()): + headers["Authorization"] = f"Bearer {self.api_key}" + + +class AsyncClient(BaseModel): + api_key: str + api_base: Optional[str] = "https://api.dify.ai/v1" + + async def arequest(self, endpoint: str, method: str, + content: Optional[types.RequestContent] = None, + data: Optional[types.RequestData] = None, + files: Optional[types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[types.QueryParamTypes] = None, + headers: Optional[Mapping[str, str]] = None, + **kwargs, + ) -> httpx.Response: + """ + Asynchronously sends a request to the specified Dify API endpoint. + + Args: + endpoint: The endpoint URL to which the request is sent. + method: The HTTP method to be used for the request (e.g., 'GET', 'POST'). + content: Raw content to include in the request body, if any. + data: Form data to be sent in the request body. + files: Files to be uploaded with the request. + json: JSON data to be sent in the request body. + params: Query parameters to be included in the request URL. + headers: Additional headers to be sent with the request. + **kwargs: Extra keyword arguments to be passed to the underlying HTTPX request function. + + Returns: + A httpx.Response object containing the server's response to the HTTP request. + + Raises: + Various DifyAPIError exceptions if the response contains an error. + """ + merged_headers = {} + if headers: + merged_headers.update(headers) + self._prepare_auth_headers(merged_headers) + + response = await _async_httpx_client.request(method, endpoint, content=content, data=data, files=files, + json=json, params=params, headers=merged_headers, **kwargs) + errors.raise_for_status(response) + return response + + async def arequest_stream(self, endpoint: str, method: str, + content: Optional[types.RequestContent] = None, + data: Optional[types.RequestData] = None, + files: Optional[types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[types.QueryParamTypes] = None, + headers: Optional[Mapping[str, str]] = None, + **kwargs, + ) -> AsyncIterator[ServerSentEvent]: + """ + Asynchronously establishes a streaming connection to the specified Dify API endpoint. + + Args: + endpoint: The endpoint URL to which the request is sent. + method: The HTTP method to be used for the request (e.g., 'GET', 'POST'). + content: Raw content to include in the request body, if any. + data: Form data to be sent in the request body. + files: Files to be uploaded with the request. + json: JSON data to be sent in the request body. + params: Query parameters to be included in the request URL. + headers: Additional headers to be sent with the request. + **kwargs: Extra keyword arguments to be passed to the underlying HTTPX request function. + + Yields: + ServerSentEvent objects representing the events received from the server. + + Raises: + Various DifyAPIError exceptions if an error event is received in the stream. + """ + merged_headers = {} + if headers: + merged_headers.update(headers) + self._prepare_auth_headers(merged_headers) + + async with aconnect_sse(_async_httpx_client, method, endpoint, headers=merged_headers, + content=content, data=data, files=files, json=json, params=params, + **kwargs) as event_source: + if not _check_stream_content_type(event_source.response): + await event_source.response.aread() + errors.raise_for_status(event_source.response) + async for sse in event_source.aiter_sse(): + errors.raise_for_status(sse) + if sse.event in IGNORED_STREAM_EVENTS or sse.data in IGNORED_STREAM_EVENTS: + continue + yield sse + + async def afeedback_messages(self, message_id: str, req: models.FeedbackRequest, **kwargs) \ + -> models.FeedbackResponse: + """ + Submits feedback for a specific message. + + Args: + message_id: The identifier of the message to submit feedback for. + req: A `FeedbackRequest` object containing the feedback details, such as the rating. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + A `FeedbackResponse` object containing the result of the feedback submission. + """ + response = await self.arequest( + self._prepare_url(ENDPOINT_FEEDBACKS, message_id=message_id), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.FeedbackResponse(**response.json()) + + async def asuggest_messages(self, message_id: str, req: models.ChatSuggestRequest, **kwargs) \ + -> models.ChatSuggestResponse: + """ + Retrieves suggested messages based on a specific message. + + Args: + message_id: The identifier of the message to get suggestions for. + req: A `ChatSuggestRequest` object containing the request details. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + A `ChatSuggestResponse` object containing suggested messages. + """ + response = await self.arequest( + self._prepare_url(ENDPOINT_SUGGESTED, message_id=message_id), + HTTPMethod.GET, + params=req.model_dump(), + **kwargs, + ) + return models.ChatSuggestResponse(**response.json()) + + async def aupload_files(self, file: types.FileTypes, req: models.UploadFileRequest, **kwargs) \ + -> models.UploadFileResponse: + """ + Uploads a file to be used in subsequent requests. + + Args: + file: The file to upload. This can be a file-like object, or a tuple of + (`filename`, file-like object, mime_type). + req: An `UploadFileRequest` object containing the upload details, such as the user who is uploading. + **kwargs: Extra keyword arguments to pass to the request function. + + Returns: + An `UploadFileResponse` object containing details about the uploaded file, such as its identifier and URL. + """ + response = await self.arequest( + self._prepare_url(ENDPOINT_FILES_UPLOAD), + HTTPMethod.POST, + data=req.model_dump(), + files=[("file", file)], + **kwargs, + ) + return models.UploadFileResponse(**response.json()) + + async def acompletion_messages(self, req: models.CompletionRequest, **kwargs) \ + -> Union[models.CompletionResponse, AsyncIterator[models.CompletionStreamResponse]]: + """ + Sends a request to generate a completion or a series of completions based on the provided input. + + Returns: + If the response mode is blocking, it returns a `CompletionResponse` object containing the generated message. + If the response mode is streaming, it returns an iterator of `CompletionStreamResponse` objects containing + the stream of generated events. + """ + if req.response_mode == models.ResponseMode.BLOCKING: + return await self._acompletion_messages(req, **kwargs) + if req.response_mode == models.ResponseMode.STREAMING: + return self._acompletion_messages_stream(req, **kwargs) + raise ValueError(f"Invalid request_mode: {req.response_mode}") + + async def _acompletion_messages(self, req: models.CompletionRequest, **kwargs) -> models.CompletionResponse: + response = await self.arequest( + self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.CompletionResponse(**response.json()) + + async def _acompletion_messages_stream(self, req: models.CompletionRequest, **kwargs) \ + -> AsyncIterator[models.CompletionStreamResponse]: + async for sse in self.arequest_stream( + self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs): + yield models.build_completion_stream_response(sse.json()) + + async def astop_completion_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + """ + Sends a request to stop a streaming completion task. + + Returns: + A `StopResponse` object indicating the success of the operation. + """ + return await self._astop_stream( + self._prepare_url(ENDPOINT_STOP_COMPLETION_MESSAGES, task_id=task_id), req, **kwargs) + + async def achat_messages(self, req: models.ChatRequest, **kwargs) \ + -> Union[models.ChatResponse, AsyncIterator[models.ChatStreamResponse]]: + """ + Sends a request to generate a chat message or a series of chat messages based on the provided input. + + Returns: + If the response mode is blocking, it returns a `ChatResponse` object containing the generated chat message. + If the response mode is streaming, it returns an iterator of `ChatStreamResponse` objects containing the + stream of chat events. + """ + if req.response_mode == models.ResponseMode.BLOCKING: + return await self._achat_messages(req, **kwargs) + if req.response_mode == models.ResponseMode.STREAMING: + return self._achat_messages_stream(req, **kwargs) + raise ValueError(f"Invalid request_mode: {req.response_mode}") + + async def _achat_messages(self, req: models.ChatRequest, **kwargs) -> models.ChatResponse: + response = await self.arequest( + self._prepare_url(ENDPOINT_CHAT_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.ChatResponse(**response.json()) + + async def _achat_messages_stream(self, req: models.ChatRequest, **kwargs) \ + -> AsyncIterator[models.ChatStreamResponse]: + async for sse in self.arequest_stream( + self._prepare_url(ENDPOINT_CHAT_MESSAGES), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs): + yield models.build_chat_stream_response(sse.json()) + + async def astop_chat_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + """ + Sends a request to stop a streaming chat task. + + Returns: + A `StopResponse` object indicating the success of the operation. + """ + return await self._astop_stream(self._prepare_url(ENDPOINT_STOP_CHAT_MESSAGES, task_id=task_id), req, **kwargs) + + async def arun_workflows(self, req: models.WorkflowsRunRequest, **kwargs) \ + -> Union[models.WorkflowsRunResponse, AsyncIterator[models.WorkflowsStreamResponse]]: + """ + Initiates the execution of a workflow, which can consist of multiple steps and actions. + + Returns: + If the response mode is blocking, it returns a `WorkflowsRunResponse` object containing the results of the + completed workflow. + If the response mode is streaming, it returns an iterator of `WorkflowsRunStreamResponse` objects + containing the stream of workflow events. + """ + if req.response_mode == models.ResponseMode.BLOCKING: + return await self._arun_workflows(req, **kwargs) + if req.response_mode == models.ResponseMode.STREAMING: + return self._arun_workflows_stream(req, **kwargs) + raise ValueError(f"Invalid request_mode: {req.response_mode}") + + async def _arun_workflows(self, req: models.WorkflowsRunRequest, **kwargs) -> models.WorkflowsRunResponse: + response = await self.arequest( + self._prepare_url(ENDPOINT_RUN_WORKFLOWS), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.WorkflowsRunResponse(**response.json()) + + async def _arun_workflows_stream(self, req: models.WorkflowsRunRequest, **kwargs) \ + -> AsyncIterator[models.WorkflowsRunStreamResponse]: + async for sse in self.arequest_stream( + self._prepare_url(ENDPOINT_RUN_WORKFLOWS), + HTTPMethod.POST, + json=req.model_dump(), + **kwargs): + yield models.build_workflows_stream_response(sse.json()) + + async def astop_workflows(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + """ + Sends a request to stop a streaming workflow task. + + Returns: + A `StopResponse` object indicating the success of the operation. + """ + return await self._astop_stream(self._prepare_url(ENDPOINT_STOP_WORKFLOWS, task_id=task_id), req, **kwargs) + + async def _astop_stream(self, endpoint: str, req: models.StopRequest, **kwargs) -> models.StopResponse: + response = await self.arequest( + endpoint, + HTTPMethod.POST, + json=req.model_dump(), + **kwargs, + ) + return models.StopResponse(**response.json()) + + def _prepare_url(self, endpoint: str, **kwargs) -> str: + return self.api_base + endpoint.format(**kwargs) + + def _prepare_auth_headers(self, headers: Dict[str, str]): + if "authorization" not in (key.lower() for key in headers.keys()): + headers["Authorization"] = f"Bearer {self.api_key}" + + +def _get_content_type(headers: httpx.Headers) -> str: + return headers.get("content-type", "").partition(";")[0] + + +def _check_stream_content_type(response: httpx.Response) -> bool: + content_type = _get_content_type(response.headers) + return response.is_success and "text/event-stream" in content_type diff --git a/dify_client_python/dify_client/errors.py b/dify_client_python/dify_client/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..a51ea3a90f6cbf5cd2c6717e59330cbb0ee75ea3 --- /dev/null +++ b/dify_client_python/dify_client/errors.py @@ -0,0 +1,134 @@ +from http import HTTPStatus +from typing import Union + +import httpx +from pydantic import BaseModel + +from dify_client_python.dify_client import models + + +class DifyAPIError(Exception): + def __init__(self, status: int, code: str, message: str): + super().__init__(f"status_code={status}, code={code}, {message}") + self.status = status + self.code = code + self.message = message + + +class DifyInvalidParam(DifyAPIError): + pass + + +class DifyNotChatApp(DifyAPIError): + pass + + +class DifyResourceNotFound(DifyAPIError): + pass + + +class DifyAppUnavailable(DifyAPIError): + pass + + +class DifyProviderNotInitialize(DifyAPIError): + pass + + +class DifyProviderQuotaExceeded(DifyAPIError): + pass + + +class DifyModelCurrentlyNotSupport(DifyAPIError): + pass + + +class DifyCompletionRequestError(DifyAPIError): + pass + + +class DifyInternalServerError(DifyAPIError): + pass + + +class DifyNoFileUploaded(DifyAPIError): + pass + + +class DifyTooManyFiles(DifyAPIError): + pass + + +class DifyUnsupportedPreview(DifyAPIError): + pass + + +class DifyUnsupportedEstimate(DifyAPIError): + pass + + +class DifyFileTooLarge(DifyAPIError): + pass + + +class DifyUnsupportedFileType(DifyAPIError): + pass + + +class DifyS3ConnectionFailed(DifyAPIError): + pass + + +class DifyS3PermissionDenied(DifyAPIError): + pass + + +class DifyS3FileTooLarge(DifyAPIError): + pass + + +SPEC_CODE_ERRORS = { + # completion & chat & workflow + "invalid_param": DifyInvalidParam, + "not_chat_app": DifyNotChatApp, + "app_unavailable": DifyAppUnavailable, + "provider_not_initialize": DifyProviderNotInitialize, + "provider_quota_exceeded": DifyProviderQuotaExceeded, + "model_currently_not_support": DifyModelCurrentlyNotSupport, + "completion_request_error": DifyCompletionRequestError, + # files upload + "no_file_uploaded": DifyNoFileUploaded, + "too_many_files": DifyTooManyFiles, + "unsupported_preview": DifyUnsupportedPreview, + "unsupported_estimate": DifyUnsupportedEstimate, + "file_too_large": DifyFileTooLarge, + "unsupported_file_type": DifyUnsupportedFileType, + "s3_connection_failed": DifyS3ConnectionFailed, + "s3_permission_denied": DifyS3PermissionDenied, + "s3_file_too_large": DifyS3FileTooLarge, +} + + +def raise_for_status(response: Union[httpx.Response, BaseModel]): + if isinstance(response, httpx.Response): + if response.is_success: + return + json = response.json() + if "status" not in json: + json["status"] = response.status_code + details = models.ErrorResponse(**json) + elif isinstance(response, BaseModel): + if not hasattr(response, 'event') or response.event != models.StreamEvent.ERROR.value: + return + details = models.ErrorStreamResponse(**response.dict()) + else: + raise ValueError(f"Invalid dify response type: {type(response)}") + + if details.status == HTTPStatus.NOT_FOUND: + raise DifyResourceNotFound(details.status, details.code, details.message) + elif details.status == HTTPStatus.INTERNAL_SERVER_ERROR: + raise DifyInternalServerError(details.status, details.code, details.message) + else: + raise SPEC_CODE_ERRORS.get(details.code, DifyAPIError)( + details.status, details.code, details.message + ) diff --git a/dify_client_python/dify_client/models/__init__.py b/dify_client_python/dify_client/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9a9aa5fb121b51222c290f4af7fcb3ea3fe070e7 --- /dev/null +++ b/dify_client_python/dify_client/models/__init__.py @@ -0,0 +1,7 @@ +from .chat import * +from .completion import * +from .feedback import * +from .file import * +from .workflow import * +from .stream import * +from .base import StopRequest, StopResponse diff --git a/dify_client_python/dify_client/models/__pycache__/__init__.cpython-310.pyc b/dify_client_python/dify_client/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cf467180a1d1723df8c37903ef402fb9c3746a5 Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/__init__.cpython-310.pyc differ diff --git a/dify_client_python/dify_client/models/__pycache__/__init__.cpython-312.pyc b/dify_client_python/dify_client/models/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ab2362ee187060937db625369eddcc89f1c2fa8 Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/__init__.cpython-312.pyc differ diff --git a/dify_client_python/dify_client/models/__pycache__/base.cpython-310.pyc b/dify_client_python/dify_client/models/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4df7a905ea04cc372075a26ea5f00a6e3d9dcb3b Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/base.cpython-310.pyc differ diff --git a/dify_client_python/dify_client/models/__pycache__/base.cpython-312.pyc b/dify_client_python/dify_client/models/__pycache__/base.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f71136a49b431aa29b1b20ca61207b9340b2474d Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/base.cpython-312.pyc differ diff --git a/dify_client_python/dify_client/models/__pycache__/chat.cpython-310.pyc b/dify_client_python/dify_client/models/__pycache__/chat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..939a0fc6323ab3cc005f8a2c8176047c880f8f10 Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/chat.cpython-310.pyc differ diff --git a/dify_client_python/dify_client/models/__pycache__/chat.cpython-312.pyc b/dify_client_python/dify_client/models/__pycache__/chat.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0778beb2eb4be22865861cd155407b2493b03f5 Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/chat.cpython-312.pyc differ diff --git a/dify_client_python/dify_client/models/__pycache__/completion.cpython-310.pyc b/dify_client_python/dify_client/models/__pycache__/completion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eee290918b5f46082a164843166a00185936ad0e Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/completion.cpython-310.pyc differ diff --git a/dify_client_python/dify_client/models/__pycache__/completion.cpython-312.pyc b/dify_client_python/dify_client/models/__pycache__/completion.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8801fdd9130937fed2dac29078aa0631887ce67b Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/completion.cpython-312.pyc differ diff --git a/dify_client_python/dify_client/models/__pycache__/feedback.cpython-310.pyc b/dify_client_python/dify_client/models/__pycache__/feedback.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44351a38316d8891e3dd85ccdac3b5a20f05cbe1 Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/feedback.cpython-310.pyc differ diff --git a/dify_client_python/dify_client/models/__pycache__/feedback.cpython-312.pyc b/dify_client_python/dify_client/models/__pycache__/feedback.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e481e82b761c13e0f00dc275adbb8ebc2bc0ad43 Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/feedback.cpython-312.pyc differ diff --git a/dify_client_python/dify_client/models/__pycache__/file.cpython-310.pyc b/dify_client_python/dify_client/models/__pycache__/file.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e5d067b67297a32de2e1b8c20bf3715a197bd15 Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/file.cpython-310.pyc differ diff --git a/dify_client_python/dify_client/models/__pycache__/file.cpython-312.pyc b/dify_client_python/dify_client/models/__pycache__/file.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b75438c51cd9df6059de2d0ebd09e34556f4787a Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/file.cpython-312.pyc differ diff --git a/dify_client_python/dify_client/models/__pycache__/stream.cpython-310.pyc b/dify_client_python/dify_client/models/__pycache__/stream.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb9688bd78044940d564c3ce76fa7d20b6e22906 Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/stream.cpython-310.pyc differ diff --git a/dify_client_python/dify_client/models/__pycache__/stream.cpython-312.pyc b/dify_client_python/dify_client/models/__pycache__/stream.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1311d5b0a8d97c73aaf94e17b497933398504303 Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/stream.cpython-312.pyc differ diff --git a/dify_client_python/dify_client/models/__pycache__/workflow.cpython-310.pyc b/dify_client_python/dify_client/models/__pycache__/workflow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68320c9b7d64d11f5297c78651a02237d9253436 Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/workflow.cpython-310.pyc differ diff --git a/dify_client_python/dify_client/models/__pycache__/workflow.cpython-312.pyc b/dify_client_python/dify_client/models/__pycache__/workflow.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a144cfd959031683ee9145c31576caef3355ce80 Binary files /dev/null and b/dify_client_python/dify_client/models/__pycache__/workflow.cpython-312.pyc differ diff --git a/dify_client_python/dify_client/models/base.py b/dify_client_python/dify_client/models/base.py new file mode 100644 index 0000000000000000000000000000000000000000..1b1fd9c5abd0d4a4349cd368b6c88210e80a3431 --- /dev/null +++ b/dify_client_python/dify_client/models/base.py @@ -0,0 +1,93 @@ +try: + from enum import StrEnum +except ImportError: + from strenum import StrEnum +from http import HTTPStatus +from typing import Optional, List + +from pydantic import BaseModel, ConfigDict + + +class Mode(StrEnum): + CHAT = "chat" + COMPLETION = "completion" + + +class ResponseMode(StrEnum): + STREAMING = 'streaming' + BLOCKING = 'blocking' + + +class FileType(StrEnum): + IMAGE = "image" + + +class TransferMethod(StrEnum): + REMOTE_URL = "remote_url" + LOCAL_FILE = "local_file" + + +# Allows the entry of various variable values defined by the App. +# The inputs parameter contains multiple key/value pairs, with each key corresponding to a specific variable and +# each value being the specific value for that variable. +# The text generation application requires at least one key/value pair to be inputted. +class CompletionInputs(BaseModel): + model_config = ConfigDict(extra='allow') + # Required The input text, the content to be processed. + query: str + + +class File(BaseModel): + type: FileType + transfer_method: TransferMethod + url: Optional[str] + # Uploaded file ID, which must be obtained by uploading through the File Upload API in advance + # (when the transfer method is local_file) + upload_file_id: Optional[str] + + +class Usage(BaseModel): + prompt_tokens: int + completion_tokens: int + total_tokens: int + + prompt_unit_price: str + prompt_price_unit: str + prompt_price: str + completion_unit_price: str + completion_price_unit: str + completion_price: str + total_price: str + currency: str + + latency: float + + +class RetrieverResource(BaseModel): + position: int + dataset_id: str + dataset_name: str + document_id: str + document_name: str + segment_id: str + score: float + content: str + + +class Metadata(BaseModel): + usage: Usage + retriever_resources: List[RetrieverResource] = [] + + +class StopRequest(BaseModel): + user: str + + +class StopResponse(BaseModel): + result: str # success + + +class ErrorResponse(BaseModel): + status: int = HTTPStatus.INTERNAL_SERVER_ERROR # HTTP status code + code: str = "" + message: str = "" diff --git a/dify_client_python/dify_client/models/chat.py b/dify_client_python/dify_client/models/chat.py new file mode 100644 index 0000000000000000000000000000000000000000..7adaabf1e2ce6d589e8787adf246376f0caa9c39 --- /dev/null +++ b/dify_client_python/dify_client/models/chat.py @@ -0,0 +1,29 @@ +from typing import Dict, List, Optional, Any + +from pydantic import BaseModel, Field + +from dify_client_python.dify_client.models.base import ResponseMode, File +from dify_client_python.dify_client.models.completion import CompletionResponse + + +class ChatRequest(BaseModel): + query: str + inputs: Dict[str, Any] = Field(default_factory=dict) + response_mode: ResponseMode + user: str + conversation_id: Optional[str] = "" + files: List[File] = [] + auto_generate_name: bool = True + + +class ChatResponse(CompletionResponse): + pass + + +class ChatSuggestRequest(BaseModel): + user: str + + +class ChatSuggestResponse(BaseModel): + result: str + data: List[str] = [] diff --git a/dify_client_python/dify_client/models/completion.py b/dify_client_python/dify_client/models/completion.py new file mode 100644 index 0000000000000000000000000000000000000000..b7ed0be85ee41d99376f00d5e34afeb737f4824b --- /dev/null +++ b/dify_client_python/dify_client/models/completion.py @@ -0,0 +1,22 @@ +from typing import Optional, List + +from pydantic import BaseModel + +from dify_client_python.dify_client.models.base import CompletionInputs, ResponseMode, File, Metadata, Mode + + +class CompletionRequest(BaseModel): + inputs: CompletionInputs + response_mode: ResponseMode + user: str + conversation_id: Optional[str] = "" + files: List[File] = [] + + +class CompletionResponse(BaseModel): + message_id: str + conversation_id: Optional[str] = "" + mode: Mode + answer: str + metadata: Metadata + created_at: int # unix timestamp seconds diff --git a/dify_client_python/dify_client/models/feedback.py b/dify_client_python/dify_client/models/feedback.py new file mode 100644 index 0000000000000000000000000000000000000000..aae0242dc054f728db4e6f0a55b0e6d80b2ae0dd --- /dev/null +++ b/dify_client_python/dify_client/models/feedback.py @@ -0,0 +1,21 @@ +try: + from enum import StrEnum +except ImportError: + from strenum import StrEnum +from typing import Optional + +from pydantic import BaseModel + + +class Rating(StrEnum): + LIKE = "like" + DISLIKE = "dislike" + + +class FeedbackRequest(BaseModel): + rating: Optional[Rating] = None + user: str + + +class FeedbackResponse(BaseModel): + result: str # success diff --git a/dify_client_python/dify_client/models/file.py b/dify_client_python/dify_client/models/file.py new file mode 100644 index 0000000000000000000000000000000000000000..9788f7ab867ba886aec59e414875a222512d3bf2 --- /dev/null +++ b/dify_client_python/dify_client/models/file.py @@ -0,0 +1,15 @@ +from pydantic import BaseModel + + +class UploadFileRequest(BaseModel): + user: str + + +class UploadFileResponse(BaseModel): + id: str + name: str + size: int + extension: str + mime_type: str + created_by: str # created by user + created_at: int # unix timestamp seconds diff --git a/dify_client_python/dify_client/models/stream.py b/dify_client_python/dify_client/models/stream.py new file mode 100644 index 0000000000000000000000000000000000000000..92632a37658355cd8dbe16684821796521140be1 --- /dev/null +++ b/dify_client_python/dify_client/models/stream.py @@ -0,0 +1,186 @@ +try: + from enum import StrEnum +except ImportError: + from strenum import StrEnum +from typing import Union, Optional, List + +from pydantic import BaseModel, ConfigDict, field_validator + +from dify_client_python.dify_client import utils +from dify_client_python.dify_client.models.base import Metadata, ErrorResponse +from dify_client_python.dify_client.models.workflow import WorkflowStartedData, WorkflowFinishedData, NodeStartedData, NodeFinishedData + +STREAM_EVENT_KEY = "event" + + +class StreamEvent(StrEnum): + MESSAGE = "message" + AGENT_MESSAGE = "agent_message" + AGENT_THOUGHT = "agent_thought" + MESSAGE_FILE = "message_file" # need to show file + WORKFLOW_STARTED = "workflow_started" + NODE_STARTED = "node_started" + NODE_FINISHED = "node_finished" + WORKFLOW_FINISHED = "workflow_finished" + MESSAGE_END = "message_end" + MESSAGE_REPLACE = "message_replace" + ERROR = "error" + PING = "ping" + + @classmethod + def new(cls, event: Union["StreamEvent", str]) -> "StreamEvent": + if isinstance(event, cls): + return event + return utils.str_to_enum(cls, event) + + +class StreamResponse(BaseModel): + model_config = ConfigDict(extra='allow') + + event: StreamEvent + task_id: Optional[str] = "" + + @field_validator("event", mode="before") + def transform_stream_event(cls, event: Union[StreamEvent, str]) -> StreamEvent: + return StreamEvent.new(event) + + +class PingResponse(StreamResponse): + pass + + +class ErrorStreamResponse(StreamResponse, ErrorResponse): + message_id: Optional[str] = "" + + +class MessageStreamResponse(StreamResponse): + message_id: str + conversation_id: Optional[str] = "" + answer: str + created_at: int # unix timestamp seconds + + +class MessageEndStreamResponse(StreamResponse): + message_id: str + conversation_id: Optional[str] = "" + created_at: int # unix timestamp seconds + metadata: Optional[Metadata] + + +class MessageReplaceStreamResponse(MessageStreamResponse): + pass + + +class AgentMessageStreamResponse(MessageStreamResponse): + pass + + +class AgentThoughtStreamResponse(StreamResponse): + id: str # agent thought id + message_id: str + conversation_id: str + position: int # thought position, start from 1 + thought: str + observation: str + tool: str + tool_input: str + message_files: List[str] = [] + created_at: int # unix timestamp seconds + + +class MessageFileStreamResponse(StreamResponse): + id: str # file id + conversation_id: str + type: str # only image + belongs_to: str # assistant + url: str + + +class WorkflowsStreamResponse(StreamResponse): + workflow_run_id: str + data: Optional[Union[ + WorkflowStartedData, + WorkflowFinishedData, + NodeStartedData, + NodeFinishedData] + ] + + +class ChatWorkflowsStreamResponse(WorkflowsStreamResponse): + message_id: str + conversation_id: str + created_at: int + + +_COMPLETION_EVENT_TO_STREAM_RESP_MAPPING = { + StreamEvent.PING: PingResponse, + StreamEvent.MESSAGE: MessageStreamResponse, + StreamEvent.MESSAGE_END: MessageEndStreamResponse, + StreamEvent.MESSAGE_REPLACE: MessageReplaceStreamResponse, +} + +CompletionStreamResponse = Union[ + PingResponse, + MessageStreamResponse, + MessageEndStreamResponse, + MessageReplaceStreamResponse, +] + + +def build_completion_stream_response(data: dict) -> CompletionStreamResponse: + event = StreamEvent.new(data.get(STREAM_EVENT_KEY)) + return _COMPLETION_EVENT_TO_STREAM_RESP_MAPPING.get(event, StreamResponse)(**data) + + +_CHAT_EVENT_TO_STREAM_RESP_MAPPING = { + StreamEvent.PING: PingResponse, + # chat + StreamEvent.MESSAGE: MessageStreamResponse, + StreamEvent.MESSAGE_END: MessageEndStreamResponse, + StreamEvent.MESSAGE_REPLACE: MessageReplaceStreamResponse, + StreamEvent.MESSAGE_FILE: MessageFileStreamResponse, + # agent + StreamEvent.AGENT_MESSAGE: AgentMessageStreamResponse, + StreamEvent.AGENT_THOUGHT: AgentThoughtStreamResponse, + # workflow + StreamEvent.WORKFLOW_STARTED: WorkflowsStreamResponse, + StreamEvent.NODE_STARTED: WorkflowsStreamResponse, + StreamEvent.NODE_FINISHED: WorkflowsStreamResponse, + StreamEvent.WORKFLOW_FINISHED: WorkflowsStreamResponse, +} + +ChatStreamResponse = Union[ + PingResponse, + MessageStreamResponse, + MessageEndStreamResponse, + MessageReplaceStreamResponse, + MessageFileStreamResponse, + AgentMessageStreamResponse, + AgentThoughtStreamResponse, + WorkflowsStreamResponse, +] + + +def build_chat_stream_response(data: dict) -> ChatStreamResponse: + event = StreamEvent.new(data.get(STREAM_EVENT_KEY)) + return _CHAT_EVENT_TO_STREAM_RESP_MAPPING.get(event, StreamResponse)(**data) + + +_WORKFLOW_EVENT_TO_STREAM_RESP_MAPPING = { + StreamEvent.PING: PingResponse, + # workflow + StreamEvent.WORKFLOW_STARTED: WorkflowsStreamResponse, + StreamEvent.NODE_STARTED: WorkflowsStreamResponse, + StreamEvent.NODE_FINISHED: WorkflowsStreamResponse, + StreamEvent.WORKFLOW_FINISHED: WorkflowsStreamResponse, +} + +WorkflowsRunStreamResponse = Union[ + PingResponse, + WorkflowsStreamResponse, +] + + +def build_workflows_stream_response(data: dict) -> WorkflowsRunStreamResponse: + event = StreamEvent.new(data.get(STREAM_EVENT_KEY)) + return _WORKFLOW_EVENT_TO_STREAM_RESP_MAPPING.get(event, StreamResponse)(**data) diff --git a/dify_client_python/dify_client/models/workflow.py b/dify_client_python/dify_client/models/workflow.py new file mode 100644 index 0000000000000000000000000000000000000000..52c3f9ace1f1ab6212bd79256f7ea18f8f77234a --- /dev/null +++ b/dify_client_python/dify_client/models/workflow.py @@ -0,0 +1,91 @@ +try: + from enum import StrEnum +except ImportError: + from strenum import StrEnum +from typing import Dict, List, Optional + +from pydantic import BaseModel + +from dify_client_python.dify_client.models.base import ResponseMode, File + + +class WorkflowStatus(StrEnum): + RUNNING = "running" + SUCCEEDED = "succeeded" + FAILED = "failed" + STOPPED = "stopped" + + +class ExecutionMetadata(BaseModel): + total_tokens: Optional[int] + total_price: Optional[str] + currency: Optional[str] + + +class WorkflowStartedData(BaseModel): + id: str # workflow run id + workflow_id: str # workflow id + sequence_number: int + inputs: Optional[dict] = None + created_at: int # unix timestamp seconds + + +class NodeStartedData(BaseModel): + id: str # workflow run id + node_id: str + node_type: str + title: str + index: int + predecessor_node_id: Optional[str] = None + inputs: Optional[dict] = None + created_at: int + extras: dict = {} + + +class NodeFinishedData(BaseModel): + id: str # workflow run id + node_id: str + node_type: str + title: str + index: int + predecessor_node_id: Optional[str] = None + inputs: Optional[dict] = None + process_data: Optional[dict] = None + outputs: Optional[dict] = {} + status: WorkflowStatus + error: Optional[str] = None + elapsed_time: Optional[float] # seconds + execution_metadata: Optional[ExecutionMetadata] = None + created_at: int + finished_at: int + files: List = [] + + +class WorkflowFinishedData(BaseModel): + id: str # workflow run id + workflow_id: str # workflow id + sequence_number: int + status: WorkflowStatus + outputs: Optional[dict] + error: Optional[str] + elapsed_time: Optional[float] + total_tokens: Optional[int] + total_steps: Optional[int] = 0 + created_at: int + finished_at: int + created_by: dict = {} + files: List = [] + + +class WorkflowsRunRequest(BaseModel): + inputs: Dict = {} + response_mode: ResponseMode + user: str + conversation_id: Optional[str] = "" + files: List[File] = [] + + +class WorkflowsRunResponse(BaseModel): + log_id: str + task_id: str + data: WorkflowFinishedData diff --git a/dify_client_python/dify_client/utils/__init__.py b/dify_client_python/dify_client/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c897d034346359c806e5c85c77fb2a385d34ec87 --- /dev/null +++ b/dify_client_python/dify_client/utils/__init__.py @@ -0,0 +1 @@ +from ._common import * diff --git a/dify_client_python/dify_client/utils/__pycache__/__init__.cpython-310.pyc b/dify_client_python/dify_client/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9116e57219dcf52f13b0594ea3b0e9c6694fb112 Binary files /dev/null and b/dify_client_python/dify_client/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/dify_client_python/dify_client/utils/__pycache__/__init__.cpython-312.pyc b/dify_client_python/dify_client/utils/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..143804db9f1390c6f29df2f1b1d9f1193d1ab0e9 Binary files /dev/null and b/dify_client_python/dify_client/utils/__pycache__/__init__.cpython-312.pyc differ diff --git a/dify_client_python/dify_client/utils/__pycache__/_common.cpython-310.pyc b/dify_client_python/dify_client/utils/__pycache__/_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da43143284ae6c06e83b75571ef443a3386702db Binary files /dev/null and b/dify_client_python/dify_client/utils/__pycache__/_common.cpython-310.pyc differ diff --git a/dify_client_python/dify_client/utils/__pycache__/_common.cpython-312.pyc b/dify_client_python/dify_client/utils/__pycache__/_common.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4a156bffd4db5299cd92661af896a813a0be710 Binary files /dev/null and b/dify_client_python/dify_client/utils/__pycache__/_common.cpython-312.pyc differ diff --git a/dify_client_python/dify_client/utils/_common.py b/dify_client_python/dify_client/utils/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..ee76f52817da8008fc3a8109514d81ff17f68da5 --- /dev/null +++ b/dify_client_python/dify_client/utils/_common.py @@ -0,0 +1,7 @@ +def str_to_enum(str_enum_class, str_value: str, ignore_not_found: bool = False, enum_default=None): + for key, member in str_enum_class.__members__.items(): + if str_value == member.value: + return member + if ignore_not_found: + return enum_default + raise ValueError(f"Invalid enum value: {str_value}") diff --git a/dify_client_python/dify_client_python.egg-info/PKG-INFO b/dify_client_python/dify_client_python.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..c2e1220202836a3b18483b5b1cd8d91a3d35df76 --- /dev/null +++ b/dify_client_python/dify_client_python.egg-info/PKG-INFO @@ -0,0 +1,175 @@ +Metadata-Version: 2.1 +Name: dify-client-python +Version: 1.0.1 +Summary: A package for interacting with the Dify Service-API +Home-page: https://github.com/haoyuhu/dify-client-python +Author: haoyuhu +Author-email: im@huhaoyu.com +License: MIT +Keywords: dify nlp ai language-processing +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: httpx +Requires-Dist: httpx-sse +Requires-Dist: pydantic +Requires-Dist: StrEnum + +# dify-client-python + +Welcome to the `dify-client-python` repository! This Python package provides a convenient and powerful interface to +interact with the Dify API, enabling developers to integrate a wide range of features into their applications with ease. + +## Main Features + +* **Synchronous and Asynchronous Support**: The client offers both synchronous and asynchronous methods, allowing for + flexible integration into various Python codebases and frameworks. +* **Stream and Non-stream Support**: Seamlessly work with both streaming and non-streaming endpoints of the Dify API for + real-time and batch processing use cases. +* **Comprehensive Endpoint Coverage**: Support completion, chat, workflows, feedback, file uploads, etc., the client + covers all available Dify API endpoints. + +## Installation + +Before using the `dify-client-python` client, you'll need to install it. You can easily install it using `pip`: + +```bash +pip install dify-client-python +``` + +## Quick Start + +Here's a quick example of how you can use the Dify Client to send a chat message. + +```python +import uuid +from dify_client import Client, models + +# Initialize the client with your API key +client = Client( + api_key="your-api-key", + api_base="http://localhost/v1", +) +user = str(uuid.uuid4()) + +# Create a blocking chat request +blocking_chat_req = models.ChatRequest( + query="Hi, dify-client-python!", + inputs={"city": "Beijing"}, + user=user, + response_mode=models.ResponseMode.BLOCKING, +) + +# Send the chat message +chat_response = client.chat_messages(blocking_chat_req, timeout=60.) +print(chat_response) + +# Create a streaming chat request +streaming_chat_req = models.ChatRequest( + query="Hi, dify-client-python!", + inputs={"city": "Beijing"}, + user=user, + response_mode=models.ResponseMode.STREAMING, +) + +# Send the chat message +for chunk in client.chat_messages(streaming_chat_req, timeout=60.): + print(chunk) +``` + +For asynchronous operations, use the `AsyncClient` in a similar fashion: + +```python +import asyncio +import uuid + +from dify_client import AsyncClient, models + +# Initialize the async client with your API key +async_client = AsyncClient( + api_key="your-api-key", + api_base="http://localhost/v1", +) + + +# Define an asynchronous function to send a blocking chat message with BLOCKING ResponseMode +async def send_chat_message(): + user = str(uuid.uuid4()) + # Create a blocking chat request + blocking_chat_req = models.ChatRequest( + query="Hi, dify-client-python!", + inputs={"city": "Beijing"}, + user=user, + response_mode=models.ResponseMode.BLOCKING, + ) + chat_response = await async_client.achat_messages(blocking_chat_req, timeout=60.) + print(chat_response) + + +# Define an asynchronous function to send a chat message with STREAMING ResponseMode +async def send_chat_message_stream(): + user = str(uuid.uuid4()) + # Create a blocking chat request + streaming_chat_req = models.ChatRequest( + query="Hi, dify-client-python!", + inputs={"city": "Beijing"}, + user=user, + response_mode=models.ResponseMode.STREAMING, + ) + async for chunk in await async_client.achat_messages(streaming_chat_req, timeout=60.): + print(chunk) + + +# Run the asynchronous function +asyncio.gather(send_chat_message(), send_chat_message_stream()) +``` + +## Documentation + +For detailed information on all the functionalities and how to use each endpoint, please refer to the official Dify API +documentation. This will provide you with comprehensive guidance on request and response structures, error handling, and +other important details. + +## Contributing + +Contributions are welcome! If you would like to contribute to the `dify-client-python`, please feel free to make a pull +request or open an issue to discuss potential changes. + +## License + +This project is licensed under the MIT License - see the LICENSE file for details. + +```text +MIT License + +Copyright (c) 2024 haoyuhu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +``` + +## Support + +If you encounter any issues or have questions regarding the usage of this client, please reach out to the Dify Client +support team. + +Happy coding! 🚀 diff --git a/dify_client_python/dify_client_python.egg-info/SOURCES.txt b/dify_client_python/dify_client_python.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..6de8ebd8292106d80d83754c5b55eea0361d87c1 --- /dev/null +++ b/dify_client_python/dify_client_python.egg-info/SOURCES.txt @@ -0,0 +1,22 @@ +LICENSE +MANIFEST.in +README.md +setup.py +dify_client/__init__.py +dify_client/_clientx.py +dify_client/errors.py +dify_client/models/__init__.py +dify_client/models/base.py +dify_client/models/chat.py +dify_client/models/completion.py +dify_client/models/feedback.py +dify_client/models/file.py +dify_client/models/stream.py +dify_client/models/workflow.py +dify_client/utils/__init__.py +dify_client/utils/_common.py +dify_client_python.egg-info/PKG-INFO +dify_client_python.egg-info/SOURCES.txt +dify_client_python.egg-info/dependency_links.txt +dify_client_python.egg-info/requires.txt +dify_client_python.egg-info/top_level.txt \ No newline at end of file diff --git a/dify_client_python/dify_client_python.egg-info/dependency_links.txt b/dify_client_python/dify_client_python.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/dify_client_python/dify_client_python.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/dify_client_python/dify_client_python.egg-info/requires.txt b/dify_client_python/dify_client_python.egg-info/requires.txt new file mode 100644 index 0000000000000000000000000000000000000000..7a4f272789c56e0976571f5d22a73f707da2eb79 --- /dev/null +++ b/dify_client_python/dify_client_python.egg-info/requires.txt @@ -0,0 +1,4 @@ +httpx +httpx-sse +pydantic +StrEnum diff --git a/dify_client_python/dify_client_python.egg-info/top_level.txt b/dify_client_python/dify_client_python.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..b5111f6c335b58caf90e006b7a56e6397c6b7aff --- /dev/null +++ b/dify_client_python/dify_client_python.egg-info/top_level.txt @@ -0,0 +1 @@ +dify_client diff --git a/dify_client_python/setup.py b/dify_client_python/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..cc3c66a98c5379ac1703b4078500afb22b0f092b --- /dev/null +++ b/dify_client_python/setup.py @@ -0,0 +1,31 @@ +from setuptools import setup + +with open("README.md", "r", encoding="utf-8") as fh: + long_description = fh.read() + +setup( + name="dify-client-python", + version="1.0.1", + author="haoyuhu", + author_email="im@huhaoyu.com", + description="A package for interacting with the Dify Service-API", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/haoyuhu/dify-client-python", + license='MIT', + packages=['dify_client'], + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + ], + python_requires=">=3.7", + install_requires=[ + "httpx", + "httpx-sse", + "pydantic", + "StrEnum", + ], + keywords='dify nlp ai language-processing', + include_package_data=True, +) diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000000000000000000000000000000000000..abddb337c7fdf3a92daf08c548fadd2e6303cecd --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,24 @@ +version: '3.8' + +services: + agent-api: + build: . + ports: + - "8443:8443" + - "8444:8444" + environment: + - DIFY_API_KEY=${DIFY_API_KEY} + volumes: + - ./logs:/app/logs + restart: unless-stopped + networks: + - dify-network + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8443/health"] + interval: 30s + timeout: 10s + retries: 3 + +networks: + dify-network: + external: true \ No newline at end of file diff --git a/example_output.md b/example_output.md new file mode 100644 index 0000000000000000000000000000000000000000..a30a66d957fd540ed1d8751bb9f8d6ba48b87348 --- /dev/null +++ b/example_output.md @@ -0,0 +1,576 @@ +(base) bdillon@YAM-GWLGVJQVCF ~ % curl -X POST http://localhost:8442/v1/agent \ +-H "Content-Type: application/json" \ +-d '{ + "query": "Show me the number of elderly (over 70) fall injuries in Illinois", + "stream": true, + "inputs": {}, + "files": [], + "user": "default_user", + "response_mode": "streaming" +}' +event: agent_thought +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'id': 'b5b6ba83-bb20-4b2a-8c26-3ba7e5e87c80', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'position': 1, 'thought': '', 'observation': '', 'tool': '', 'tool_input': '', 'message_files': [], 'created_at': 1732127965, 'tool_labels': {}} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'I', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': "'ll help you retrieve the", 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' fall injury data for elderly individuals', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' in Illinois. I', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': "'ll use multiple tools to compile", 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' a comprehensive analysis.', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n\nFirst, I', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': "'ll retrieve the census data for Illinois an", 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'd then cross-reference it with fall', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '-related injury data:', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_thought +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'id': 'b5b6ba83-bb20-4b2a-8c26-3ba7e5e87c80', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'position': 1, 'thought': "I'll help you retrieve the fall injury data for elderly individuals in Illinois. I'll use multiple tools to compile a comprehensive analysis.\n\nFirst, I'll retrieve the census data for Illinois and then cross-reference it with fall-related injury data:", 'observation': '', 'tool': 'get_census_data', 'tool_input': '{"get_census_data": {"state_code": "17"}}', 'message_files': [], 'created_at': 1732127965, 'tool_labels': {'get_census_data': {'en_US': 'get_census_data', 'zh_Hans': 'get_census_data'}}} + +event: agent_thought +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'id': 'b5b6ba83-bb20-4b2a-8c26-3ba7e5e87c80', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'position': 1, 'thought': "I'll help you retrieve the fall injury data for elderly individuals in Illinois. I'll use multiple tools to compile a comprehensive analysis.\n\nFirst, I'll retrieve the census data for Illinois and then cross-reference it with fall-related injury data:", 'observation': '{"get_census_data": "{\\"llm_result\\": \\"**Bulleted List:**\\\\n\\\\n* Census Tract 9513: A high percentage (1.02%) of residents have ambulatory difficulty.\\\\n* Census Tract 9005: A high percentage (1.02%) of residents have ambulatory difficulty.\\\\n* Census Tract 101: A high percentage (0.51%) of residents have ambulatory difficulty.\\\\n* Census Tract 102: A high percentage (0.51%) of residents have ambulatory difficulty.\\\\n* Census Tract 4008: A high percentage (0.51%) of residents have ambulatory difficulty.\\\\n\\\\n\\\\n**Numbered Summary:**\\\\n\\\\n1. Census Tract 9513: 5515 residents with disabilities (1.02%), $50,734 median income.\\\\n2. Census Tract 9005: 6375 residents with disabilities (1.02%), $88,325 median income.\\\\n3. Census Tract 101: 4487 residents with disabilities (0.51%), $69,882 median income.\\\\n4. Census Tract 102: 4013 residents with disabilities (0.51%), $47,625 median income.\\\\n5. Census Tract 4008: 4783 residents with disabilities (0.51%), $71,574 median income.\\\\n\\\\n\\\\n**JSON Summary:**\\\\n\\\\n```json\\\\n[\\\\n {\\\\n \\\\\\"tract\\\\\\": \\\\\\"9513\\\\\\",\\\\n \\\\\\"total_population\\\\\\": 6931,\\\\n \\\\\\"disabilities\\\\\\": 6931,\\\\n \\\\\\"ambulatory_difficulty\\\\\\": 39,\\\\n \\\\\\"median_income\\\\\\": 50734,\\\\n \\\\\\"ambulatory_difficulty_percentage\\\\\\": 0.56\\\\n },\\\\n {\\\\n \\\\\\"tract\\\\\\": \\\\\\"9005\\\\\\",\\\\n \\\\\\"total_population\\\\\\": 6375,\\\\n \\\\\\"disabilities\\\\\\": 6375,\\\\n \\\\\\"ambulatory_difficulty\\\\\\": 82,\\\\n \\\\\\"median_income\\\\\\": 58828,\\\\n \\\\\\"ambulatory_difficulty_percentage\\\\\\": 1.28\\\\n },\\\\n {\\\\n \\\\\\"tract\\\\\\": \\\\\\"101\\\\\\",\\\\n \\\\\\"total_population\\\\\\": 4487,\\\\n \\\\\\"disabilities\\\\\\": 4487,\\\\n \\\\\\"ambulatory_difficulty\\\\\\": 52,\\\\n \\\\\\"median_income\\\\\\": 69882,\\\\n \\\\\\"ambulatory_difficulty_percentage\\\\\\": 1.16\\\\n },\\\\n {\\\\n \\\\\\"tract\\\\\\": \\\\\\"102\\\\\\",\\\\n \\\\\\"total_population\\\\\\": 4095,\\\\n \\\\\\"disabilities\\\\\\": 4013,\\\\n \\\\\\"ambulatory_difficulty\\\\\\": 0,\\\\n \\\\\\"median_income\\\\\\": 47625,\\\\n \\\\\\"ambulatory_difficulty_percentage\\\\\\": 0.00\\\\n },\\\\n {\\\\n \\\\\\"tract\\\\\\": \\\\\\"4008\\\\\\",\\\\n \\\\\\"total_population\\\\\\": 4783,\\\\n \\\\\\"disabilities\\\\\\": 4783,\\\\n \\\\\\"ambulatory_difficulty\\\\\\": 13,\\\\n \\\\\\"median_income\\\\\\": 71574,\\\\n \\\\\\"ambulatory_difficulty_percentage\\\\\\": 0.27\\\\n }\\\\n]\\\\n```\\\\n\\"}tool response: {\\"llm_result\\": \\"**Bulleted List:**\\\\n\\\\n* Census Tract 9513: A high percentage (1.02%) of residents have ambulatory difficulty.\\\\n* Census Tract 9005: A high percentage (1.02%) of residents have ambulatory difficulty.\\\\n* Census Tract 101: A high percentage (0.51%) of residents have ambulatory difficulty.\\\\n* Census Tract 102: A high percentage (0.51%) of residents have ambulatory difficulty.\\\\n* Census Tract 4008: A high percentage (0.51%) of residents have ambulatory difficulty.\\\\n\\\\n\\\\n**Numbered Summary:**\\\\n\\\\n1. Census Tract 9513: 5515 residents with disabilities (1.02%), $50,734 median income.\\\\n2. Census Tract 9005: 6375 residents with disabilities (1.02%), $88,325 median income.\\\\n3. Census Tract 101: 4487 residents with disabilities (0.51%), $69,882 median income.\\\\n4. Census Tract 102: 4013 residents with disabilities (0.51%), $47,625 median income.\\\\n5. Census Tract 4008: 4783 residents with disabilities (0.51%), $71,574 median income.\\\\n\\\\n\\\\n**JSON Summary:**\\\\n\\\\n```json\\\\n[\\\\n {\\\\n \\\\\\"tract\\\\\\": \\\\\\"9513\\\\\\",\\\\n \\\\\\"total_population\\\\\\": 6931,\\\\n \\\\\\"disabilities\\\\\\": 6931,\\\\n \\\\\\"ambulatory_difficulty\\\\\\": 39,\\\\n \\\\\\"median_income\\\\\\": 50734,\\\\n \\\\\\"ambulatory_difficulty_percentage\\\\\\": 0.56\\\\n },\\\\n {\\\\n \\\\\\"tract\\\\\\": \\\\\\"9005\\\\\\",\\\\n \\\\\\"total_population\\\\\\": 6375,\\\\n \\\\\\"disabilities\\\\\\": 6375,\\\\n \\\\\\"ambulatory_difficulty\\\\\\": 82,\\\\n \\\\\\"median_income\\\\\\": 58828,\\\\n \\\\\\"ambulatory_difficulty_percentage\\\\\\": 1.28\\\\n },\\\\n {\\\\n \\\\\\"tract\\\\\\": \\\\\\"101\\\\\\",\\\\n \\\\\\"total_population\\\\\\": 4487,\\\\n \\\\\\"disabilities\\\\\\": 4487,\\\\n \\\\\\"ambulatory_difficulty\\\\\\": 52,\\\\n \\\\\\"median_income\\\\\\": 69882,\\\\n \\\\\\"ambulatory_difficulty_percentage\\\\\\": 1.16\\\\n },\\\\n {\\\\n \\\\\\"tract\\\\\\": \\\\\\"102\\\\\\",\\\\n \\\\\\"total_population\\\\\\": 4095,\\\\n \\\\\\"disabilities\\\\\\": 4013,\\\\n \\\\\\"ambulatory_difficulty\\\\\\": 0,\\\\n \\\\\\"median_income\\\\\\": 47625,\\\\n \\\\\\"ambulatory_difficulty_percentage\\\\\\": 0.00\\\\n },\\\\n {\\\\n \\\\\\"tract\\\\\\": \\\\\\"4008\\\\\\",\\\\n \\\\\\"total_population\\\\\\": 4783,\\\\n \\\\\\"disabilities\\\\\\": 4783,\\\\n \\\\\\"ambulatory_difficulty\\\\\\": 13,\\\\n \\\\\\"median_income\\\\\\": 71574,\\\\n \\\\\\"ambulatory_difficulty_percentage\\\\\\": 0.27\\\\n }\\\\n]\\\\n```\\\\n\\"}."}', 'tool': 'get_census_data', 'tool_input': '{"get_census_data": {"state_code": "17"}}', 'message_files': [], 'created_at': 1732127965, 'tool_labels': {'get_census_data': {'en_US': 'get_census_data', 'zh_Hans': 'get_census_data'}}} + +event: agent_thought +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'id': '499a53fd-069c-416c-9cf7-77ea5c9ffda6', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'position': 2, 'thought': '', 'observation': '', 'tool': '', 'tool_input': '', 'message_files': [], 'created_at': 1732127965, 'tool_labels': {}} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_thought +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'id': '499a53fd-069c-416c-9cf7-77ea5c9ffda6', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'position': 2, 'thought': '', 'observation': '', 'tool': 'dataset_52d133ab_4165_4e0f_a9f7_7093a951cc25', 'tool_input': '{"dataset_52d133ab_4165_4e0f_a9f7_7093a951cc25": {"query": "Fall injuries for elderly over 70 in Illinois"}}', 'message_files': [], 'created_at': 1732127965, 'tool_labels': {'dataset_52d133ab_4165_4e0f_a9f7_7093a951cc25': {'en_US': 'dataset_52d133ab_4165_4e0f_a9f7_7093a951cc25', 'zh_Hans': 'dataset_52d133ab_4165_4e0f_a9f7_7093a951cc25'}}} + +event: agent_thought +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'id': '499a53fd-069c-416c-9cf7-77ea5c9ffda6', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'position': 2, 'thought': '', 'observation': '{"dataset_52d133ab_4165_4e0f_a9f7_7093a951cc25": "Mechanism: Fall;Intent: Undetermined;Deaths: 89;Medical Costs Total: $1.13 M;Medical Costs Average: $12,719;Value of Statistical Life Total: $903.70 M;Value of Statistical Life Average: $10.15 M;Combined Costs Total: $904.83 M;Combined Costs Average: $10.17 M"}', 'tool': 'dataset_52d133ab_4165_4e0f_a9f7_7093a951cc25', 'tool_input': '{"dataset_52d133ab_4165_4e0f_a9f7_7093a951cc25": {"query": "Fall injuries for elderly over 70 in Illinois"}}', 'message_files': [], 'created_at': 1732127965, 'tool_labels': {'dataset_52d133ab_4165_4e0f_a9f7_7093a951cc25': {'en_US': 'dataset_52d133ab_4165_4e0f_a9f7_7093a951cc25', 'zh_Hans': 'dataset_52d133ab_4165_4e0f_a9f7_7093a951cc25'}}} + +: ping - 2024-11-20 18:39:39.453130 + +event: agent_thought +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'id': 'e309aeec-fa1b-425f-afba-8314f746c7fd', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'position': 3, 'thought': '', 'observation': '', 'tool': '', 'tool_input': '', 'message_files': [], 'created_at': 1732127965, 'tool_labels': {}} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n\nLet', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' me provide a comprehensive', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' analysis of fall injuries for', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' the elderly in Illinois:', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n\n### Fall Injuries for Elderly', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' (Over 70) in', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Illinois\n\n#### Injury', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Statistics\n- **Total Fall', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '-Related Deaths**:', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' 89\n- **Medical', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Costs**:\n - Total', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Medical Expenses: $1.13', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Million\n - Average Medical', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Cost per Incident: $12,719', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n- **Economic', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Impact**:\n -', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Total Combined Costs: $904', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '.83 Million', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n - Average Combined Cost', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' per Incident: $', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '10.17 Million', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n\n#### Demographic', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Insights\nFrom the census', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' data, we observe', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'd:\n1. Several', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' census tracts with', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' high ambulatory difficulty percent', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'ages:\n - Census', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Tract 9005:', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' 1.28% ambulatory difficulty\n ', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '- Census Tract ', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '101: 1.', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '16% ambulatory difficulty\n - Census', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Tract 9513: 0.', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '56% ambulatory difficulty', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n\n#### Key Observations', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n1. The data', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' suggests a significant economic burden from fall-', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'related injuries among the elderly in', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Illinois.\n2. Medical', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' costs and statistical life value', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' indicate the critical nature', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' of fall prevention.', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n3. Some census tracts show higher', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' percentages of residents', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' with ambulatory difficulties, which', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' correlates with fall risk.', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_thought +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'id': 'e309aeec-fa1b-425f-afba-8314f746c7fd', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'position': 3, 'thought': '\n\nLet me provide a comprehensive analysis of fall injuries for the elderly in Illinois:\n\n### Fall Injuries for Elderly (Over 70) in Illinois\n\n#### Injury Statistics\n- **Total Fall-Related Deaths**: 89\n- **Medical Costs**:\n - Total Medical Expenses: $1.13 Million\n - Average Medical Cost per Incident: $12,719\n- **Economic Impact**:\n - Total Combined Costs: $904.83 Million\n - Average Combined Cost per Incident: $10.17 Million\n\n#### Demographic Insights\nFrom the census data, we observed:\n1. Several census tracts with high ambulatory difficulty percentages:\n - Census Tract 9005: 1.28% ambulatory difficulty\n - Census Tract 101: 1.16% ambulatory difficulty\n - Census Tract 9513: 0.56% ambulatory difficulty\n\n#### Key Observations\n1. The data suggests a significant economic burden from fall-related injuries among the elderly in Illinois.\n2. Medical costs and statistical life value indicate the critical nature of fall prevention.\n3. Some census tracts show higher percentages of residents with ambulatory difficulties, which correlates with fall risk.', 'observation': '', 'tool': 'heatmap_code', 'tool_input': '{"heatmap_code": {"input": "Illinois Fall Injury Hotspots: Elderly Population Ambulatory Difficulty"}}', 'message_files': [], 'created_at': 1732127965, 'tool_labels': {'heatmap_code': {'en_US': 'heatmap_code', 'zh_Hans': 'heatmap_code'}}} + +event: agent_thought +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'id': 'e309aeec-fa1b-425f-afba-8314f746c7fd', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'position': 3, 'thought': '\n\nLet me provide a comprehensive analysis of fall injuries for the elderly in Illinois:\n\n### Fall Injuries for Elderly (Over 70) in Illinois\n\n#### Injury Statistics\n- **Total Fall-Related Deaths**: 89\n- **Medical Costs**:\n - Total Medical Expenses: $1.13 Million\n - Average Medical Cost per Incident: $12,719\n- **Economic Impact**:\n - Total Combined Costs: $904.83 Million\n - Average Combined Cost per Incident: $10.17 Million\n\n#### Demographic Insights\nFrom the census data, we observed:\n1. Several census tracts with high ambulatory difficulty percentages:\n - Census Tract 9005: 1.28% ambulatory difficulty\n - Census Tract 101: 1.16% ambulatory difficulty\n - Census Tract 9513: 0.56% ambulatory difficulty\n\n#### Key Observations\n1. The data suggests a significant economic burden from fall-related injuries among the elderly in Illinois.\n2. Medical costs and statistical life value indicate the critical nature of fall prevention.\n3. Some census tracts show higher percentages of residents with ambulatory difficulties, which correlates with fall risk.', 'observation': '{"heatmap_code": "{\\"coordinates\\": \\"```json\\\\n[\\\\n [40.2976, -88.7912, 1.0],\\\\n [40.4406, -88.9869, 1.0],\\\\n [40.7413, -87.6184, 1.0],\\\\n [39.7456, -89.6183, 1.0],\\\\n [39.0419, -89.3995, 1.0]\\\\n]\\\\n```\\\\n\\", \\"code\\": \\"```javascript\\\\nvar map = L.map(\'map\').setView([40, -89], 8); // Centered roughly on Illinois\\\\n\\\\nvar tiles = L.tileLayer(\'https://tile.openstreetmap.org/{z}/{x}/{y}.png\', {\\\\n attribution: \'© OpenStreetMap contributors\',\\\\n}).addTo(map);\\\\n\\\\n// Sample data (replace with your actual data). Crucially, this needs intensity values.\\\\n// Higher intensity = more prominent on the heatmap.\\\\nvar heatPoints = [\\\\n [41.8781, -87.6298, 0.8], // Example point, replace with real data and intensities\\\\n [40.7128, -88.0016, 0.6], // Example point, replace with real data and intensities\\\\n [42.0149, -88.4559, 0.9], // Example point, replace with real data and intensities\\\\n [39.7420, -89.6787, 0.5] // Example point, replace with real data and intensities\\\\n];\\\\n\\\\n\\\\nvar heat = L.heatLayer(heatPoints, {\\\\n radius: 25,\\\\n blur: 15,\\\\n maxZoom: 18,\\\\n gradient: {\\\\n 0.4: \'blue\',\\\\n 0.65: \'lime\',\\\\n 1: \'red\'\\\\n }\\\\n}).addTo(map);\\\\n\\\\n\\\\n// Example of adding a marker for a point (optional)\\\\n// L.marker([41.8781, -87.6298]).addTo(map);\\\\n```\\\\n\\"}tool response: {\\"coordinates\\": \\"```json\\\\n[\\\\n [40.2976, -88.7912, 1.0],\\\\n [40.4406, -88.9869, 1.0],\\\\n [40.7413, -87.6184, 1.0],\\\\n [39.7456, -89.6183, 1.0],\\\\n [39.0419, -89.3995, 1.0]\\\\n]\\\\n```\\\\n\\", \\"code\\": \\"```javascript\\\\nvar map = L.map(\'map\').setView([40, -89], 8); // Centered roughly on Illinois\\\\n\\\\nvar tiles = L.tileLayer(\'https://tile.openstreetmap.org/{z}/{x}/{y}.png\', {\\\\n attribution: \'© OpenStreetMap contributors\',\\\\n}).addTo(map);\\\\n\\\\n// Sample data (replace with your actual data). Crucially, this needs intensity values.\\\\n// Higher intensity = more prominent on the heatmap.\\\\nvar heatPoints = [\\\\n [41.8781, -87.6298, 0.8], // Example point, replace with real data and intensities\\\\n [40.7128, -88.0016, 0.6], // Example point, replace with real data and intensities\\\\n [42.0149, -88.4559, 0.9], // Example point, replace with real data and intensities\\\\n [39.7420, -89.6787, 0.5] // Example point, replace with real data and intensities\\\\n];\\\\n\\\\n\\\\nvar heat = L.heatLayer(heatPoints, {\\\\n radius: 25,\\\\n blur: 15,\\\\n maxZoom: 18,\\\\n gradient: {\\\\n 0.4: \'blue\',\\\\n 0.65: \'lime\',\\\\n 1: \'red\'\\\\n }\\\\n}).addTo(map);\\\\n\\\\n\\\\n// Example of adding a marker for a point (optional)\\\\n// L.marker([41.8781, -87.6298]).addTo(map);\\\\n```\\\\n\\"}."}', 'tool': 'heatmap_code', 'tool_input': '{"heatmap_code": {"input": "Illinois Fall Injury Hotspots: Elderly Population Ambulatory Difficulty"}}', 'message_files': [], 'created_at': 1732127965, 'tool_labels': {'heatmap_code': {'en_US': 'heatmap_code', 'zh_Hans': 'heatmap_code'}}} + +event: agent_thought +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'id': '410a1128-2803-41ad-9017-4d6badc73155', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'position': 4, 'thought': '', 'observation': '', 'tool': '', 'tool_input': '', 'message_files': [], 'created_at': 1732127965, 'tool_labels': {}} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n\n# Elderly Fall', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Injuries in Illinois:', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Comprehensive Analysis\n\n## Key', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Findings for Fall Injuries (', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'Elderly Over 70', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ')\n\n###', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Injury Statistics', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n- **', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'Total Fall', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '-Related Deaths', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '**: 89', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n- **Medical', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Costs', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '**: ', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n - Total', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ':', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' $1.13', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Million', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n - Average', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Cost', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' per', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Incident', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ': $12,719', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n- **Value', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' of Statistical', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Life**', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ':\n - Total', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ': $903', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '.', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '70', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Million\n -', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Average:', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' $10.15', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Million', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n- **Combine', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'd Total', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Costs**: $904', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '.83 Million', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n\n### Demographic', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Insights', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\nBase', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'd on census', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' data for', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Illinois, we', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' identifie', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'd several key census', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' tracts with high', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' ambulatory', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' difficulty:', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n\n1', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '. **', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'Census Tract 9', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '005**\n ', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '- Ambul', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'atory Difficulty: ', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '1.28', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '%\n - Total', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Population: 6', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ',375', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n - Median', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Income: $88', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ',325', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +: ping - 2024-11-20 18:39:54.454461 + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n\n2. **Census', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Tract 9513', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '**\n -', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Ambulatory Difficulty:', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' 0.56', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '%\n - Total', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Population: 6,931', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n - Median', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Income: $50,734', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n\n### Geographic', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Hotspots\nI', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': "'ve generated a h", 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'eatmap highlighting fall', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' injury risk areas in Illinois. The', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' map shows concentration points base', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'd on:\n- Ambulatory difficulty', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' percentages\n- Population density\n- Geographic', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' coordinates\n\n#### Hotspot', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Coordinates\n- Central Illinois: [', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '40.2976, -88.7912', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ']\n- Northwestern Illinois: [40.', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '4406, -88.9869]', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n- Northeastern Illinois: [40.', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '7413, -87.6184]', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n- Southern Illinois: [39.', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '7456, -89.6183]', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n- Southwestern Illinois: [39.0419', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ', -89.3995]\n\n##', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Recommendations\n1. Focus', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' fall prevention programs in high-risk census', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' tracts\n2. Develop', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' targeted interventions for areas', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' with high ambulatory difficulty', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n3. Consider income', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '-based support for medical', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' costs in vulnerable regions\n\n###', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Visualization\n- A detaile', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'd heatmap has been generated showing fall injury', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' risk zones in Illinois\n- Color', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' gradient indicates intensity of risk (', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'blue to red)\n-', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' Radius and blur settings help visualize concentration', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' of risk\n\n## Limitations', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '\n- Data represents reporte', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'd incidents\n- Some rural', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' or underreported areas might', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' not be fully captured\n- Actual', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' numbers may vary due to reporting', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' inconsistencies\n\nWoul', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': 'd you like me to dive', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' deeper into any specific aspect of these', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': ' fall injury statistics?', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_message +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'answer': '', 'created_at': 1732127965, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1'} + +event: agent_thought +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'id': '410a1128-2803-41ad-9017-4d6badc73155', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'position': 4, 'thought': "\n\n# Elderly Fall Injuries in Illinois: Comprehensive Analysis\n\n## Key Findings for Fall Injuries (Elderly Over 70)\n\n### Injury Statistics\n- **Total Fall-Related Deaths**: 89\n- **Medical Costs**: \n - Total: $1.13 Million\n - Average Cost per Incident: $12,719\n- **Value of Statistical Life**:\n - Total: $903.70 Million\n - Average: $10.15 Million\n- **Combined Total Costs**: $904.83 Million\n\n### Demographic Insights\nBased on census data for Illinois, we identified several key census tracts with high ambulatory difficulty:\n\n1. **Census Tract 9005**\n - Ambulatory Difficulty: 1.28%\n - Total Population: 6,375\n - Median Income: $88,325\n\n2. **Census Tract 9513**\n - Ambulatory Difficulty: 0.56%\n - Total Population: 6,931\n - Median Income: $50,734\n\n### Geographic Hotspots\nI've generated a heatmap highlighting fall injury risk areas in Illinois. The map shows concentration points based on:\n- Ambulatory difficulty percentages\n- Population density\n- Geographic coordinates\n\n#### Hotspot Coordinates\n- Central Illinois: [40.2976, -88.7912]\n- Northwestern Illinois: [40.4406, -88.9869]\n- Northeastern Illinois: [40.7413, -87.6184]\n- Southern Illinois: [39.7456, -89.6183]\n- Southwestern Illinois: [39.0419, -89.3995]\n\n## Recommendations\n1. Focus fall prevention programs in high-risk census tracts\n2. Develop targeted interventions for areas with high ambulatory difficulty\n3. Consider income-based support for medical costs in vulnerable regions\n\n### Visualization\n- A detailed heatmap has been generated showing fall injury risk zones in Illinois\n- Color gradient indicates intensity of risk (blue to red)\n- Radius and blur settings help visualize concentration of risk\n\n## Limitations\n- Data represents reported incidents\n- Some rural or underreported areas might not be fully captured\n- Actual numbers may vary due to reporting inconsistencies\n\nWould you like me to dive deeper into any specific aspect of these fall injury statistics?", 'observation': '', 'tool': '', 'tool_input': '', 'message_files': [], 'created_at': 1732127965, 'tool_labels': {}} + +event: message_end +data: {'task_id': 'a22e7690-db24-4552-aa10-4a67645f6a03', 'message_id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'conversation_id': '3e35bd4c-c11c-44b7-a5ee-b9d27ffb44b5', 'created_at': 1732127965, 'metadata': {'usage': {'prompt_tokens': 22994, 'completion_tokens': 534, 'total_tokens': 4189, 'prompt_unit_price': '1.00', 'prompt_price_unit': '0.000001', 'prompt_price': '0.0229940', 'completion_unit_price': '5.00', 'completion_price_unit': '0.000001', 'completion_price': '0.0026700', 'total_price': '0.0256640', 'currency': 'USD', 'latency': 3.0692982099717483}, 'retriever_resources': [{'position': 1, 'dataset_id': '52d133ab-4165-4e0f-a9f7-7093a951cc25', 'dataset_name': 'WISQARS_Fall_Related_Injuries', 'document_id': '7d6fc783-d111-4155-bfdc-ae8bb5969b4c', 'document_name': 'cost-data-export (3).csv', 'segment_id': '502ffbfa-13a0-4703-85ea-fb7953225fd1', 'score': 0.40905005712009535, 'content': 'Mechanism: Fall;Intent: Undetermined;Deaths: 89;Medical Costs Total: $1.13 M;Medical Costs Average: $12,719;Value of Statistical Life Total: $903.70 M;Value of Statistical Life Average: $10.15 M;Combined Costs Total: $904.83 M;Combined Costs Average: $10.17 M'}]}, 'id': 'f26b600a-9413-440f-bbfc-7d3a93c389c1', 'files': None} + +(base) bdillon@YAM-GWLGVJQVCF ~ % \ No newline at end of file diff --git a/hf_api.py b/hf_api.py new file mode 100644 index 0000000000000000000000000000000000000000..73a9a4aa9823765203b4532ee22ca64ea8c780e9 --- /dev/null +++ b/hf_api.py @@ -0,0 +1,352 @@ +from fastapi import FastAPI, HTTPException, Request +from fastapi.middleware.cors import CORSMiddleware +from typing import Dict, List, Optional, Union, Any +from pydantic import BaseModel, Field +from datetime import datetime +import logging +import json +import os +from dotenv import load_dotenv +from dify_client_python.dify_client import models +from sse_starlette.sse import EventSourceResponse +import httpx +from json_parser import SSEParser +from logger_config import setup_logger +from fastapi.responses import StreamingResponse +from fastapi.responses import JSONResponse +from response_formatter import ResponseFormatter +import traceback + +# Load environment variables +load_dotenv() + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +logger = logging.getLogger(__name__) + +class AgentOutput(BaseModel): + """Structured output from agent processing""" + thought_content: str + observation: Optional[str] + tool_outputs: List[Dict] + citations: List[Dict] + metadata: Dict + raw_response: str + +class AgentRequest(BaseModel): + """Enhanced request model with additional parameters""" + query: str + conversation_id: Optional[str] = None + stream: bool = True + inputs: Dict = {} + files: List = [] + user: str = "default_user" + response_mode: str = "streaming" + +class AgentProcessor: + def __init__(self, api_key: str): + self.api_key = api_key + # Update API base to use environment variable with fallback + self.api_base = os.getenv( + "API_BASE_URL", + "https://rag-engine.go-yamamoto.com/v1" + ) + self.formatter = ResponseFormatter() + self.client = httpx.AsyncClient(timeout=60.0) + self.logger = setup_logger("agent_processor") + + + async def log_request_details( + self, + request: AgentRequest, + start_time: datetime + ) -> None: + """Log detailed request information""" + self.logger.debug( + "Request details: \n" + f"Query: {request.query}\n" + f"User: {request.user}\n" + f"Conversation ID: {request.conversation_id}\n" + f"Stream mode: {request.stream}\n" + f"Start time: {start_time}\n" + f"Inputs: {request.inputs}\n" + f"Files: {len(request.files)} files attached" + ) + + async def log_error( + self, + error: Exception, + context: Optional[Dict] = None + ) -> None: + """Log detailed error information""" + error_msg = ( + f"Error type: {type(error).__name__}\n" + f"Error message: {str(error)}\n" + f"Stack trace:\n{traceback.format_exc()}\n" + ) + if context: + error_msg += f"Context:\n{json.dumps(context, indent=2)}" + + self.logger.error(error_msg) + + async def cleanup(self): + """Cleanup method to properly close client""" + await self.client.aclose() + + async def process_stream(self, request: AgentRequest): + start_time = datetime.now() + await self.log_request_details(request, start_time) + + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + "Accept": "text/event-stream" + } + + chat_request = { + "query": request.query, + "inputs": request.inputs, + "response_mode": "streaming" if request.stream else "blocking", + "user": request.user, + "conversation_id": request.conversation_id, + "files": request.files + } + + async def event_generator(): + parser = SSEParser() + citations = [] + metadata = {} + + try: + async with self.client.stream( + "POST", + f"{self.api_base}/chat-messages", + headers=headers, + json=chat_request + ) as response: + self.logger.debug( + f"Stream connection established\n" + f"Status: {response.status_code}\n" + f"Headers: {dict(response.headers)}" + ) + + buffer = "" + async for line in response.aiter_lines(): + if not line.strip(): + continue + + self.logger.debug(f"Raw SSE line: {line}") + + if "data:" in line: + try: + data = line.split("data:", 1)[1].strip() + parsed = json.loads(data) + + if parsed.get("event") == "message_end": + citations = parsed.get("retriever_resources", []) + metadata = parsed.get("metadata", {}) + self.logger.debug( + f"Message end event:\n" + f"Citations: {citations}\n" + f"Metadata: {metadata}" + ) + + formatted = self.format_terminal_output( + parsed, + citations=citations, + metadata=metadata + ) + if formatted: + self.logger.info(formatted) + except Exception as e: + await self.log_error( + e, + {"line": line, "event": "parse_data"} + ) + + buffer += line + "\n" + + if line.startswith("data:") or buffer.strip().endswith("}"): + try: + processed_response = parser.parse_sse_event(buffer) + if processed_response and isinstance(processed_response, dict): + cleaned_response = self.clean_response(processed_response) + if cleaned_response: + xml_content = cleaned_response.get("content", "") + yield f"data: {xml_content}\n\n" + except Exception as parse_error: + await self.log_error( + parse_error, + {"buffer": buffer, "event": "process_buffer"} + ) + error_xml = ( + f"" + f"{str(parse_error)}" + f"" + ) + yield f"data: {error_xml}\n\n" + finally: + buffer = "" + + except httpx.ConnectError as e: + await self.log_error(e, {"event": "connection_error"}) + error_xml = ( + f"" + f"Connection error: {str(e)}" + f"" + ) + yield f"data: {error_xml}\n\n" + except Exception as e: + await self.log_error(e, {"event": "stream_error"}) + error_xml = ( + f"" + f"Streaming error: {str(e)}" + f"" + ) + yield f"data: {error_xml}\n\n" + finally: + end_time = datetime.now() + duration = (end_time - start_time).total_seconds() + self.logger.info(f"Request completed in {duration:.2f} seconds") + + return StreamingResponse( + event_generator(), + media_type="text/event-stream", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + "X-Accel-Buffering": "no", + "Access-Control-Allow-Origin": "*" + } + ) + + def format_terminal_output( + self, + response: Dict, + citations: List[Dict] = None, + metadata: Dict = None + ) -> Optional[str]: + """Format response for terminal output""" + event_type = response.get("event") + + if event_type == "agent_thought": + thought = response.get("thought", "") + observation = response.get("observation", "") + terminal_output, _ = self.formatter.format_thought( + thought, + observation, + citations=citations, + metadata=metadata + ) + return terminal_output + + elif event_type == "agent_message": + message = response.get("answer", "") + terminal_output, _ = self.formatter.format_message(message) + return terminal_output + + elif event_type == "error": + error = response.get("error", "Unknown error") + terminal_output, _ = self.formatter.format_error(error) + return terminal_output + + return None + + def clean_response(self, response: Dict) -> Optional[Dict]: + """Clean and transform the response for frontend consumption""" + try: + event_type = response.get("event") + if not event_type: + return None + + # Handle different event types + if event_type == "agent_thought": + thought = response.get("thought", "") + observation = response.get("observation", "") + _, xml_output = self.formatter.format_thought(thought, observation) + return { + "type": "thought", + "content": xml_output + } + + elif event_type == "agent_message": + message = response.get("answer", "") + _, xml_output = self.formatter.format_message(message) + return { + "type": "message", + "content": xml_output + } + + elif event_type == "error": + error = response.get("error", "Unknown error") + _, xml_output = self.formatter.format_error(error) + return { + "type": "error", + "content": xml_output + } + + return None + except Exception as e: + logger.error(f"Error cleaning response: {str(e)}") + return None + +# Initialize FastAPI app +app = FastAPI() +agent_processor = None + +# Add CORS middleware +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +@app.on_event("startup") +async def startup_event(): + global agent_processor + api_key = os.getenv("DIFY_API_KEY", "app-kVHTrZzEmFXEBfyXOi4rro7M") + agent_processor = AgentProcessor(api_key=api_key) + +@app.on_event("shutdown") +async def shutdown_event(): + global agent_processor + if agent_processor: + await agent_processor.cleanup() + +@app.post("/v1/agent") +async def process_agent_request(request: AgentRequest): + try: + logger.info(f"Processing agent request: {request.query}") + return await agent_processor.process_stream(request) + + except Exception as e: + logger.error(f"Error in agent request processing: {e}", exc_info=True) + raise HTTPException(status_code=500, detail=str(e)) + +@app.middleware("http") +async def error_handling_middleware(request: Request, call_next): + try: + response = await call_next(request) + return response + except Exception as e: + logger.error(f"Unhandled error: {str(e)}", exc_info=True) + return JSONResponse( + status_code=500, + content={"error": "Internal server error occurred"} + ) + +# Add host and port parameters to the launch +if __name__ == "__main__": + import uvicorn + port = int(os.getenv("PORT", 7860)) + uvicorn.run( + "api:app", + host="0.0.0.0", + port=port, + reload=True + ) \ No newline at end of file diff --git a/json_parser.py b/json_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..014ff0e3dd3cf0a4562ae92e36bb3b1a1333c980 --- /dev/null +++ b/json_parser.py @@ -0,0 +1,105 @@ +from logger_config import setup_logger +from typing import Dict, Any, Optional, List, Union +from dataclasses import dataclass, asdict +from enum import Enum +import json +from dify_client_python.dify_client.models.stream import ( + StreamEvent, + StreamResponse, + build_chat_stream_response +) +import re + +logger = setup_logger() + +class EventType(Enum): + AGENT_THOUGHT = "agent_thought" + AGENT_MESSAGE = "agent_message" + MESSAGE_END = "message_end" + PING = "ping" + +@dataclass +class ToolCall: + tool_name: str + tool_input: Dict[str, Any] + tool_output: Optional[str] + tool_labels: Dict[str, Dict[str, str]] + +@dataclass +class Citation: + dataset_id: str + dataset_name: str + document_id: str + document_name: str + segment_id: str + score: float + content: str + +@dataclass +class ProcessedResponse: + event_type: EventType + task_id: str + message_id: str + conversation_id: str + content: str + tool_calls: List[ToolCall] + citations: List[Citation] + metadata: Dict[str, Any] + created_at: int + +class EnumEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, Enum): + return obj.value + if hasattr(obj, 'dict'): + return obj.dict() + return super().default(obj) + +class SSEParser: + def __init__(self): + self.logger = setup_logger("sse_parser") + + def parse_sse_event(self, data: str) -> Optional[Dict]: + """Parse SSE event data and return cleaned dictionary""" + self.logger.debug("Parsing SSE event") + + try: + # Extract the data portion + if "data:" in data: + data = data.split("data:", 1)[1].strip() + + # Parse JSON data + parsed_data = json.loads(data) + + # Clean tool outputs if present + if "observation" in parsed_data: + try: + observation = parsed_data["observation"] + if observation and isinstance(observation, str): + tool_data = json.loads(observation) + # Extract relevant tool output + for key, value in tool_data.items(): + if isinstance(value, str) and "llm_result" in value: + tool_result = json.loads(value)["llm_result"] + parsed_data["observation"] = self.clean_tool_output(tool_result) + except: + pass # Keep original observation if parsing fails + + return parsed_data + + except json.JSONDecodeError as e: + self.logger.error(f"JSON decode error: {str(e)}") + return None + except Exception as e: + self.logger.error(f"Parse error: {str(e)}") + return None + + def clean_tool_output(self, output: str) -> str: + """Clean tool output by removing markdown and other formatting""" + # Remove markdown code blocks + output = re.sub(r'```.*?```', '', output, flags=re.DOTALL) + # Remove other markdown formatting + output = re.sub(r'[*_`#]', '', output) + # Clean up whitespace + output = re.sub(r'\n{3,}', '\n\n', output.strip()) + return output \ No newline at end of file diff --git a/logger_config.py b/logger_config.py new file mode 100644 index 0000000000000000000000000000000000000000..b33246a11a87d263ea69163229a2c03dc65060c1 --- /dev/null +++ b/logger_config.py @@ -0,0 +1,102 @@ +import logging +import sys +import os +from datetime import datetime +from typing import Optional + +class CustomFormatter(logging.Formatter): + """Custom formatter with color coding and detailed formatting""" + + grey = "\x1b[38;21m" + blue = "\x1b[38;5;39m" + yellow = "\x1b[38;5;226m" + red = "\x1b[38;5;196m" + bold_red = "\x1b[31;1m" + reset = "\x1b[0m" + + def __init__(self, include_path: bool = False): + date_fmt = "%Y-%m-%d %H:%M:%S.%f" + if include_path: + fmt = ("%(asctime)s.%(msecs)03d - %(name)s - %(levelname)s " + "[%(pathname)s:%(lineno)d] - %(message)s") + else: + fmt = "%(asctime)s.%(msecs)03d - %(name)s - %(levelname)s - %(message)s" + + super().__init__(fmt=fmt, datefmt=date_fmt) + + def format(self, record): + # Save original format + format_orig = self._style._fmt + + # Apply color based on log level + if record.levelno == logging.DEBUG: + self._style._fmt = self.grey + format_orig + self.reset + elif record.levelno == logging.INFO: + self._style._fmt = self.blue + format_orig + self.reset + elif record.levelno == logging.WARNING: + self._style._fmt = self.yellow + format_orig + self.reset + elif record.levelno == logging.ERROR: + self._style._fmt = self.red + format_orig + self.reset + elif record.levelno == logging.CRITICAL: + self._style._fmt = self.bold_red + format_orig + self.reset + + # Call original format + result = logging.Formatter.format(self, record) + + # Restore original format + self._style._fmt = format_orig + + return result + +def setup_logger( + name: str = __name__, + log_level: int = logging.DEBUG, + log_file: Optional[str] = None +) -> logging.Logger: + """ + Sets up a configured logger with both console and file handlers. + + Args: + name: Logger name + log_level: Logging level + log_file: Optional specific log file path + + Returns: + Configured logger instance + """ + logger = logging.getLogger(name) + + # Only add handlers if logger doesn't have any + if not logger.handlers: + logger.setLevel(log_level) + + # Ensure logs directory exists + os.makedirs('logs', exist_ok=True) + + # Console handler with minimal formatting + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setLevel(logging.INFO) + console_formatter = CustomFormatter(include_path=False) + console_handler.setFormatter(console_formatter) + + # File handler with detailed formatting + if not log_file: + log_file = f'logs/sse_stream_{datetime.now():%Y%m%d}.log' + + file_handler = logging.FileHandler(log_file) + file_handler.setLevel(logging.DEBUG) + file_formatter = CustomFormatter(include_path=True) + file_handler.setFormatter(file_formatter) + + # Debug file handler for trace-level logging + debug_file = f'logs/debug_{datetime.now():%Y%m%d}.log' + debug_handler = logging.FileHandler(debug_file) + debug_handler.setLevel(logging.DEBUG) + debug_formatter = CustomFormatter(include_path=True) + debug_handler.setFormatter(debug_formatter) + + logger.addHandler(console_handler) + logger.addHandler(file_handler) + logger.addHandler(debug_handler) + + return logger \ No newline at end of file diff --git a/logs/debug_20241120.log b/logs/debug_20241120.log new file mode 100644 index 0000000000000000000000000000000000000000..0ac337641612326cea6a953c79601fb0d2a7a08d --- /dev/null +++ b/logs/debug_20241120.log @@ -0,0 +1,19 @@ +2024-11-20 20:03:02.f.435 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:02.f.436 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:02.f.541 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:02.f.721 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:02.f.826 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:02.f.986 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:03.f.071 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:03.f.811 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:03.f.813 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:13.f.680 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:34.f.665 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:34.f.666 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:34.f.809 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:35.f.007 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:36.f.302 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:36.f.369 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:36.f.395 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:04:01.f.681 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:09:09.f.948 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event diff --git a/logs/sse_stream_20241120.log b/logs/sse_stream_20241120.log new file mode 100644 index 0000000000000000000000000000000000000000..0ac337641612326cea6a953c79601fb0d2a7a08d --- /dev/null +++ b/logs/sse_stream_20241120.log @@ -0,0 +1,19 @@ +2024-11-20 20:03:02.f.435 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:02.f.436 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:02.f.541 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:02.f.721 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:02.f.826 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:02.f.986 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:03.f.071 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:03.f.811 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:03.f.813 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:13.f.680 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:34.f.665 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:34.f.666 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:34.f.809 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:35.f.007 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:36.f.302 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:36.f.369 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:03:36.f.395 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:04:01.f.681 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event +2024-11-20 20:09:09.f.948 - sse_parser - DEBUG [/Users/anima/Downloads/cc-api-main/json_parser.py:64] - Parsing SSE event diff --git a/middleware.py b/middleware.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/nginx.conf b/nginx.conf new file mode 100644 index 0000000000000000000000000000000000000000..9978d975a7dd2c08e20dafbd28657d0f2fe31025 --- /dev/null +++ b/nginx.conf @@ -0,0 +1,32 @@ +server { + listen 8443; + server_name localhost; + + # Proxy headers + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # API endpoint + location /api/ { + proxy_pass http://localhost:8444/; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_cache_bypass $http_upgrade; + } + + # Health check endpoint + location /health { + access_log off; + return 200 'OK'; + add_header Content-Type text/plain; + } + + # Error handling + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } +} \ No newline at end of file diff --git a/readme.md b/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..aedf135d0212cb0410e2fa63290d6e1d9a1a6f10 --- /dev/null +++ b/readme.md @@ -0,0 +1,21 @@ +conda activate cc-api + +pip install -r requirements.txt + +uvicorn api:app --reload --port 8224 + +cloudflared tunnel --url localhost:8224 + +pip install -e . + + +curl -X POST https://advertisement-recent-play-hits.trycloudflare.com/v1/agent \ +-H "Content-Type: application/json" \ +-d '{ + "query": "Show me the number of elderly (over 70) fall injuries in Illinois", + "stream": true, + "inputs": {}, + "files": [], + "user": "default_user", + "response_mode": "streaming" +}' \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ef86abb8bfd567b011302c13ac037c24409c9a5d --- /dev/null +++ b/requirements.txt @@ -0,0 +1,14 @@ +sseclient-py +fastapi +uvicorn +python-dotenv +requests +gradio +pydantic +pydantic-settings +pydantic-core +pydantic-extra-types +sse-starlette +httpx +httpx-sse +dify-client-python \ No newline at end of file diff --git a/response_formatter.py b/response_formatter.py new file mode 100644 index 0000000000000000000000000000000000000000..ae930968423343e3eaa69f840765a2dafccca48d --- /dev/null +++ b/response_formatter.py @@ -0,0 +1,195 @@ +from typing import Dict, Optional, Tuple, List, Any +import re +import xml.etree.ElementTree as ET +from datetime import datetime +import json + +class ToolType: + DUCKDUCKGO = "duckduckgo_search" + REDDIT_NEWS = "reddit_x_gnews_newswire_crunchbase" + PUBMED = "pubmed_search" + CENSUS = "get_census_data" + HEATMAP = "heatmap_code" + MERMAID = "mermaid_diagram" + WISQARS = "wisqars" + WONDER = "wonder" + NCHS = "nchs" + ONESTEP = "onestep" + DQS = "dqs_nhis_adult_summary_health_statistics" + +class ResponseFormatter: + @staticmethod + def format_thought( + thought: str, + observation: Optional[str] = None, + citations: List[Dict] = None, + metadata: Dict = None + ) -> Tuple[str, str]: + """Format agent thought and observation for both terminal and XML output""" + # Terminal format + terminal_output = thought.strip() + if observation: + cleaned_obs = ResponseFormatter._clean_markdown(observation) + if cleaned_obs: + terminal_output += f"\n\nObservation:\n{cleaned_obs}" + + # XML format + root = ET.Element("agent_response") + thought_elem = ET.SubElement(root, "thought") + thought_elem.text = thought.strip() + + if observation: + obs_elem = ET.SubElement(root, "observation") + + # Extract and format tool outputs + tool_outputs = ResponseFormatter._extract_tool_outputs(observation) + if tool_outputs: + tools_elem = ET.SubElement(obs_elem, "tools") + for tool_name, tool_data in tool_outputs.items(): + tool_elem = ResponseFormatter._create_tool_element(tools_elem, tool_name, tool_data) + + # Add citations if available + if citations: + citations_elem = ET.SubElement(root, "citations") + for citation in citations: + cite_elem = ET.SubElement(citations_elem, "citation") + for key, value in citation.items(): + cite_detail = ET.SubElement(cite_elem, key) + cite_detail.text = str(value) + + # Add metadata if available + if metadata: + metadata_elem = ET.SubElement(root, "metadata") + for key, value in metadata.items(): + meta_detail = ET.SubElement(metadata_elem, key) + meta_detail.text = str(value) + + xml_output = ET.tostring(root, encoding='unicode') + return terminal_output, xml_output + + @staticmethod + def _create_tool_element(parent: ET.Element, tool_name: str, tool_data: Dict) -> ET.Element: + """Create XML element for specific tool type with appropriate structure""" + tool_elem = ET.SubElement(parent, "tool") + tool_elem.set("name", tool_name) + + # Handle different tool types + if tool_name == ToolType.CENSUS: + ResponseFormatter._format_census_data(tool_elem, tool_data) + elif tool_name == ToolType.MERMAID: + ResponseFormatter._format_mermaid_data(tool_elem, tool_data) + elif tool_name in [ToolType.WISQARS, ToolType.WONDER, ToolType.NCHS]: + ResponseFormatter._format_health_data(tool_elem, tool_data) + else: + # Generic tool output format + content_elem = ET.SubElement(tool_elem, "content") + content_elem.text = ResponseFormatter._clean_markdown(str(tool_data)) + + return tool_elem + + @staticmethod + def _format_census_data(tool_elem: ET.Element, data: Dict) -> None: + """Format census data with specific structure""" + try: + # Extract census tract data + tracts_elem = ET.SubElement(tool_elem, "census_tracts") + + # Parse the llm_result to extract structured data + if "llm_result" in data: + result = json.loads(data["llm_result"]) + for tract_data in result.get("tracts", []): + tract_elem = ET.SubElement(tracts_elem, "tract") + tract_elem.set("id", str(tract_data.get("tract", ""))) + + # Add tract details + for key, value in tract_data.items(): + if key != "tract": + detail_elem = ET.SubElement(tract_elem, key.replace("_", "")) + detail_elem.text = str(value) + except: + # Fallback to simple format if parsing fails + content_elem = ET.SubElement(tool_elem, "content") + content_elem.text = ResponseFormatter._clean_markdown(str(data)) + + @staticmethod + def _format_mermaid_data(tool_elem: ET.Element, data: Dict) -> None: + """Format mermaid diagram data""" + try: + diagram_elem = ET.SubElement(tool_elem, "diagram") + if "mermaid_diagram" in data: + # Clean the mermaid code + mermaid_code = re.sub(r'```mermaid\s*|\s*```', '', data["mermaid_diagram"]) + diagram_elem.text = mermaid_code.strip() + except: + content_elem = ET.SubElement(tool_elem, "content") + content_elem.text = ResponseFormatter._clean_markdown(str(data)) + + @staticmethod + def _format_health_data(tool_elem: ET.Element, data: Dict) -> None: + """Format health-related data from WISQARS, WONDER, etc.""" + try: + if isinstance(data, dict): + for key, value in data.items(): + category_elem = ET.SubElement(tool_elem, key.replace("_", "")) + if isinstance(value, dict): + for sub_key, sub_value in value.items(): + sub_elem = ET.SubElement(category_elem, sub_key.replace("_", "")) + sub_elem.text = str(sub_value) + else: + category_elem.text = str(value) + except: + content_elem = ET.SubElement(tool_elem, "content") + content_elem.text = ResponseFormatter._clean_markdown(str(data)) + + @staticmethod + def _extract_tool_outputs(observation: str) -> Dict[str, Any]: + """Extract and clean tool outputs from observation""" + tool_outputs = {} + try: + if isinstance(observation, str): + data = json.loads(observation) + for key, value in data.items(): + if isinstance(value, str) and "llm_result" in value: + try: + tool_result = json.loads(value) + tool_outputs[key] = tool_result + except: + tool_outputs[key] = value + except: + pass + return tool_outputs + + @staticmethod + def format_message(message: str) -> Tuple[str, str]: + """Format agent message for both terminal and XML output""" + # Terminal format + terminal_output = message.strip() + + # XML format + root = ET.Element("agent_response") + msg_elem = ET.SubElement(root, "message") + msg_elem.text = message.strip() + + xml_output = ET.tostring(root, encoding='unicode') + return terminal_output, xml_output + + @staticmethod + def format_error(error: str) -> Tuple[str, str]: + """Format error message for both terminal and XML output""" + # Terminal format + terminal_output = f"Error: {error}" + + # XML format + root = ET.Element("agent_response") + error_elem = ET.SubElement(root, "error") + error_elem.text = error + + xml_output = ET.tostring(root, encoding='unicode') + return terminal_output, xml_output + + @staticmethod + def _clean_markdown(text: str) -> str: + """Clean markdown formatting from text""" + text = re.sub(r'```.*?```', '', text, flags=re.DOTALL) + text = re.sub(r'[*_`#]', '', text) + return re.sub(r'\n{3,}', '\n\n', text.strip()) \ No newline at end of file diff --git a/start.sh b/start.sh new file mode 100644 index 0000000000000000000000000000000000000000..8b0cc2c8bef82f1488a2dde464763d39952fb241 --- /dev/null +++ b/start.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Start Nginx +nginx + +# Start the FastAPI application +uvicorn api:app --host 0.0.0.0 --port 8444 \ No newline at end of file diff --git a/xml_tag_guide.md b/xml_tag_guide.md new file mode 100644 index 0000000000000000000000000000000000000000..cfc5d35bd87b56dbde4bd102086cd0676d86717c --- /dev/null +++ b/xml_tag_guide.md @@ -0,0 +1,408 @@ +# XML Tag Structure Guide + +## Root Element +- `` - Root element containing all response data + +## Primary Response Types +1. `` - Agent's reasoning and analysis process + - Contains the agent's internal thinking process + - May appear empty (``) during processing + - Can contain full text analysis and reasoning + +2. `` - Direct agent messages/responses + - Contains user-facing messages + - May appear empty (``) during processing + - Can be partial messages during streaming + - Often contains markdown formatting + - Common patterns: + ```xml + + + + + partial response + + + complete response with markdown **formatting** + + + multi-line + response with + breaks + ``` + - Used for: + - Direct responses to user queries + - Status updates during processing + - Incremental output during streaming + - Formatted text with markdown + - Multi-line responses + - Interactive prompts and questions + +3. `` - Error messages and notifications + - Contains error details when something goes wrong + - Used for both system and processing errors + +## Observation Structure +- `` - Contains tool outputs and analysis results + - May appear empty (``) during processing + - `` - Container for all tool outputs + - `` - Individual tool output container + - Required attribute: `name` matching ToolType constants + +## Tool-Specific Tags + +### Census Data (`get_census_data`) +- `` - Container for census tract data + - May appear empty if no tract data + - `` - Individual census tract + - Required attribute: `id` with tract identifier + - `` - Population statistics + - `` - Income statistics + - `` - Demographic information + - Additional dynamic fields based on census data + +### Mermaid Diagrams (`mermaid_diagram`) +- `` - Contains the cleaned mermaid diagram code + - Contains raw mermaid syntax without markdown backticks + +### Health Data Tools +Common structure for WISQARS, WONDER, NCHS tools: +- `` - Health data container + - Dynamic category elements based on data structure + - Nested elements for hierarchical data + +### Generic Tool Output +For tools like DuckDuckGo, Reddit News, PubMed, etc: +- `` - Contains cleaned, formatted tool output text + - Markdown formatting removed + - Code blocks cleaned + - Whitespace normalized + +## Streaming Response Pattern +Responses typically come in this sequence: + +1. Initial empty thought: +```xml + +``` + +2. Message chunks (multiple): +```xml +I +'ll help you analyze +the data. +``` + +3. Empty message marker: +```xml + +``` + +4. Thought statement: +```xml +Complete thought statement +``` + +5. Thought with observation (when using tools): +```xml + + Complete thought statement + + + + + {"llmresult": "tool output data"} + + + + +``` + +6. Empty thought transition: +```xml + +``` + +7. Final message chunks: +```xml +Final analysis +and conclusions. + +``` + +Each chunk is prefixed with `data: ` in the stream and represents a single step in the response generation process. Messages are often split into smaller chunks for streaming, while thoughts and observations typically come as complete units. + +## Supported Tool Types +- `duckduckgo_search` - Web search results +- `reddit_x_gnews_newswire_crunchbase` - News and social media data +- `pubmed_search` - Medical research papers +- `get_census_data` - Census statistics +- `heatmap_code` - Visualization code +- `mermaid_diagram` - Diagram generation +- `wisqars` - Injury statistics +- `wonder` - CDC Wonder database +- `nchs` - National Center for Health Statistics +- `onestep` - One-step analysis +- `dqs_nhis_adult_summary_health_statistics` - Health statistics + + +EXAMPLE OUTPUT: + +(base) bdillon@YAM-GWLGVJQVCF cc-api % +curl -X POST https://merry-contrast-substantially-initiative.trycloudflare.com/v1/agent \ +-H "Content-Type: application/json" \ +-d '{ + "query": "Show me the number of elderly (over 70) fall injuries in Illinois", + "stream": true, + "inputs": {}, + "files": [], + "user": "default_user", + "response_mode": "streaming" +}' + +data: + +data: I + +data: 'll help you analyze elderly fall injuries in Illinois using + +data: multiple data sources and create a comprehensive visualization of + +data: the findings. + +1. First, let's + +data: get census data for Illinois (FIPS code: 17 + +data: ): + +data: + +data: I'll help you analyze elderly fall injuries in Illinois using multiple data sources and create a comprehensive visualization of the findings. + +1. First, let's get census data for Illinois (FIPS code: 17): + +data: I'll help you analyze elderly fall injuries in Illinois using multiple data sources and create a comprehensive visualization of the findings. + +1. First, let's get census data for Illinois (FIPS code: 17):{"llmresult": "Bulleted List:\n\n Census Tract 9005: A high percentage (1.7%) of residents have ambulatory difficulty.\n Census Tract 101: A high percentage (1.0%) of residents have ambulatory difficulty.\n Census Tract 4008: A high percentage (1.0%) of residents have ambulatory difficulty.\n Census Tract 102: A high percentage (0.9%) of residents have ambulatory difficulty.\n Census Tract 8808: A high percentage (0.9%) of residents have ambulatory difficulty.\n\n\nNumbered Summary:\n\n1. Census Tract 9005: 4347 residents with ambulatory difficulty (1.7%), $58,828 median income.\n2. Census Tract 101: 3733 residents with ambulatory difficulty (1.0%), $54,120 median income.\n3. Census Tract 4008: 4783 residents with ambulatory difficulty (1.0%), $71,574 median income.\n4. Census Tract 102: 3306 residents with ambulatory difficulty (0.9%), $70,107 median income.\n5. Census Tract 8808: 3318 residents with ambulatory difficulty (0.9%), $71,563 median income.\n\n\nJSON Summary:\n\n\n"}tool response: {"llmresult": "Bulleted List:\n\n Census Tract 9005: A high percentage (1.7%) of residents have ambulatory difficulty.\n Census Tract 101: A high percentage (1.0%) of residents have ambulatory difficulty.\n Census Tract 4008: A high percentage (1.0%) of residents have ambulatory difficulty.\n Census Tract 102: A high percentage (0.9%) of residents have ambulatory difficulty.\n Census Tract 8808: A high percentage (0.9%) of residents have ambulatory difficulty.\n\n\nNumbered Summary:\n\n1. Census Tract 9005: 4347 residents with ambulatory difficulty (1.7%), $58,828 median income.\n2. Census Tract 101: 3733 residents with ambulatory difficulty (1.0%), $54,120 median income.\n3. Census Tract 4008: 4783 residents with ambulatory difficulty (1.0%), $71,574 median income.\n4. Census Tract 102: 3306 residents with ambulatory difficulty (0.9%), $70,107 median income.\n5. Census Tract 8808: 3318 residents with ambulatory difficulty (0.9%), $71,563 median income.\n\n\nJSON Summary:\n\n\n"}. + +data: + +data: Let + +data: me query the WISQARS dataset + +data: specifically for fall injuries: + +data: + +data: Let me query the WISQARS dataset specifically for fall injuries: + +data: Let me query the WISQARS dataset specifically for fall injuries: + +data: + +data: Let + +data: me analyze the fall injuries data for elderly + +data: individuals in Illinois by combining census data an + +data: d WISQARS fall injury data. I'll create + +data: a comprehensive visualization to help understand the distribution. + +data: + +data: Let me analyze the fall injuries data for elderly individuals in Illinois by combining census data and WISQARS fall injury data. I'll create a comprehensive visualization to help understand the distribution. + +data: Let me analyze the fall injuries data for elderly individuals in Illinois by combining census data and WISQARS fall injury data. I'll create a comprehensive visualization to help understand the distribution. + +data: + +data: Based on the data + +data: collected, I can provide an analysis of elderly + +data: fall injuries and ambulatory difficulties in Illinois: + +data: 1. **Census Data Analysis** + +data: - The highest concentration of ambulatory difficulty is + +data: in Census Tract 9005 with 1.7 + +data: % of residents affected +- Four other + +data: tracts show significant ambulatory difficulty rates ranging + +data: from 0.9% to 1. + +data: 0% +- There's a correlation between areas with higher ambulatory difficulty + +data: and moderate median incomes ($54,120- + +data: $71,574) + +2. **Key + +data: Findings by Census Tract**: + - + +data: Tract 9005: 4 + +data: ,347 residents with disabilities (highest concentration + +data: ) + - Tract 4008: 4, + +data: 783 residents with disabilities + - Tract + +data: 101: 3,733 residents with disabilities + +data: - Tract 102: 3, + +data: 306 residents with disabilities + - Tract + +data: 8808: 3,318 + +data: residents with disabilities + +3. **Risk + +data: Factors**: + - Higher concent + +data: rations of elderly populations in specific census tracts + - + +data: Varying income levels that may affect access to prevent + +data: ive care + - Geographic distribution showing + +data: clusters of high-risk areas + +4. + +data: **Visualization**: +I've include + +data: d a pie chart showing the distribution of ambulatory difficulty percentages across the top + +data: census tracts in Illinois. + +```mermaid +pie + +data: title Illinois Census Tracts - + +data: Ambulatory Difficulty Distribution + "Tract + +data: 9005 (1.7%) + +data: ": 31 + "Tract + +data: 101 (1.0%)": 18 + +data: "Tract 4008 ( + +data: 1.0%)": 18 + "Tract + +data: 102 (0.9%)": + +data: 16 + "Tract 8808 + +data: (0.9%)": 16 +``` + +5. ** + +data: Recommendations for Fall Prevention**: + - Focus + +data: resources on Census Tract 9005, + +data: which shows the highest concentration of ambulatory difficulties + +data: - Implement targeted intervention programs in areas + +data: with higher elderly populations + - Consider income levels + +data: when designing accessibility and support services + +To + +data: get more specific data about fall injuries, I + +data: recommend: +1. Consulting local healthcare + +data: facilities for actual fall incident reports +2. Working + +data: with senior living communities in these + +data: census tracts +3. Conducting + +data: detailed assessments of environmental factors in high-risk areas + +data: Would you like more detailed information about any specific aspect of this + +data: analysis or additional data about particular census tracts? + +data: + +data: Based on the data collected, I can provide an analysis of elderly fall injuries and ambulatory difficulties in Illinois: + +1. **Census Data Analysis** +- The highest concentration of ambulatory difficulty is in Census Tract 9005 with 1.7% of residents affected +- Four other tracts show significant ambulatory difficulty rates ranging from 0.9% to 1.0% +- There's a correlation between areas with higher ambulatory difficulty and moderate median incomes ($54,120-$71,574) + +2. **Key Findings by Census Tract**: + - Tract 9005: 4,347 residents with disabilities (highest concentration) + - Tract 4008: 4,783 residents with disabilities + - Tract 101: 3,733 residents with disabilities + - Tract 102: 3,306 residents with disabilities + - Tract 8808: 3,318 residents with disabilities + +3. **Risk Factors**: + - Higher concentrations of elderly populations in specific census tracts + - Varying income levels that may affect access to preventive care + - Geographic distribution showing clusters of high-risk areas + +4. **Visualization**: +I've included a pie chart showing the distribution of ambulatory difficulty percentages across the top census tracts in Illinois. + +```mermaid +pie + title Illinois Census Tracts - Ambulatory Difficulty Distribution + "Tract 9005 (1.7%)": 31 + "Tract 101 (1.0%)": 18 + "Tract 4008 (1.0%)": 18 + "Tract 102 (0.9%)": 16 + "Tract 8808 (0.9%)": 16 +``` + +5. **Recommendations for Fall Prevention**: + - Focus resources on Census Tract 9005, which shows the highest concentration of ambulatory difficulties + - Implement targeted intervention programs in areas with higher elderly populations + - Consider income levels when designing accessibility and support services + +To get more specific data about fall injuries, I recommend: +1. Consulting local healthcare facilities for actual fall incident reports +2. Working with senior living communities in these census tracts +3. Conducting detailed assessments of environmental factors in high-risk areas + +Would you like more detailed information about any specific aspect of this analysis or additional data about particular census tracts? \ No newline at end of file