Severian commited on
Commit
995af0f
1 Parent(s): fd0612c

Upload 81 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Dockerfile +15 -0
  2. api.py +346 -0
  3. api_docs.md +154 -0
  4. dify_client_python/.DS_Store +0 -0
  5. dify_client_python/LICENSE +21 -0
  6. dify_client_python/MANIFEST.in +1 -0
  7. dify_client_python/README.md +155 -0
  8. dify_client_python/build.sh +9 -0
  9. dify_client_python/build/lib/dify_client/__init__.py +1 -0
  10. dify_client_python/build/lib/dify_client/_clientx.py +660 -0
  11. dify_client_python/build/lib/dify_client/errors.py +132 -0
  12. dify_client_python/build/lib/dify_client/models/__init__.py +7 -0
  13. dify_client_python/build/lib/dify_client/models/base.py +93 -0
  14. dify_client_python/build/lib/dify_client/models/chat.py +29 -0
  15. dify_client_python/build/lib/dify_client/models/completion.py +22 -0
  16. dify_client_python/build/lib/dify_client/models/feedback.py +21 -0
  17. dify_client_python/build/lib/dify_client/models/file.py +15 -0
  18. dify_client_python/build/lib/dify_client/models/stream.py +186 -0
  19. dify_client_python/build/lib/dify_client/models/workflow.py +91 -0
  20. dify_client_python/build/lib/dify_client/utils/__init__.py +1 -0
  21. dify_client_python/build/lib/dify_client/utils/_common.py +7 -0
  22. dify_client_python/dify_client/.DS_Store +0 -0
  23. dify_client_python/dify_client/__init__.py +1 -0
  24. dify_client_python/dify_client/__pycache__/__init__.cpython-310.pyc +0 -0
  25. dify_client_python/dify_client/__pycache__/__init__.cpython-312.pyc +0 -0
  26. dify_client_python/dify_client/__pycache__/_clientx.cpython-310.pyc +0 -0
  27. dify_client_python/dify_client/__pycache__/_clientx.cpython-312.pyc +0 -0
  28. dify_client_python/dify_client/__pycache__/errors.cpython-310.pyc +0 -0
  29. dify_client_python/dify_client/__pycache__/errors.cpython-312.pyc +0 -0
  30. dify_client_python/dify_client/_clientx.py +694 -0
  31. dify_client_python/dify_client/errors.py +134 -0
  32. dify_client_python/dify_client/models/__init__.py +7 -0
  33. dify_client_python/dify_client/models/__pycache__/__init__.cpython-310.pyc +0 -0
  34. dify_client_python/dify_client/models/__pycache__/__init__.cpython-312.pyc +0 -0
  35. dify_client_python/dify_client/models/__pycache__/base.cpython-310.pyc +0 -0
  36. dify_client_python/dify_client/models/__pycache__/base.cpython-312.pyc +0 -0
  37. dify_client_python/dify_client/models/__pycache__/chat.cpython-310.pyc +0 -0
  38. dify_client_python/dify_client/models/__pycache__/chat.cpython-312.pyc +0 -0
  39. dify_client_python/dify_client/models/__pycache__/completion.cpython-310.pyc +0 -0
  40. dify_client_python/dify_client/models/__pycache__/completion.cpython-312.pyc +0 -0
  41. dify_client_python/dify_client/models/__pycache__/feedback.cpython-310.pyc +0 -0
  42. dify_client_python/dify_client/models/__pycache__/feedback.cpython-312.pyc +0 -0
  43. dify_client_python/dify_client/models/__pycache__/file.cpython-310.pyc +0 -0
  44. dify_client_python/dify_client/models/__pycache__/file.cpython-312.pyc +0 -0
  45. dify_client_python/dify_client/models/__pycache__/stream.cpython-310.pyc +0 -0
  46. dify_client_python/dify_client/models/__pycache__/stream.cpython-312.pyc +0 -0
  47. dify_client_python/dify_client/models/__pycache__/workflow.cpython-310.pyc +0 -0
  48. dify_client_python/dify_client/models/__pycache__/workflow.cpython-312.pyc +0 -0
  49. dify_client_python/dify_client/models/base.py +93 -0
  50. dify_client_python/dify_client/models/chat.py +29 -0
Dockerfile ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9-slim
2
+
3
+ WORKDIR /code
4
+
5
+ COPY ./requirements.txt /code/requirements.txt
6
+ COPY ./api.py /code/api.py
7
+ COPY ./json_parser.py /code/json_parser.py
8
+ COPY ./logger_config.py /code/logger_config.py
9
+ COPY ./response_formatter.py /code/response_formatter.py
10
+
11
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
12
+
13
+ EXPOSE 7860
14
+
15
+ CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "7860"]
api.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException, Request
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from typing import Dict, List, Optional, Union, Any
4
+ from pydantic import BaseModel, Field
5
+ from datetime import datetime
6
+ import logging
7
+ import json
8
+ import os
9
+ from dotenv import load_dotenv
10
+ from dify_client_python.dify_client import models
11
+ from sse_starlette.sse import EventSourceResponse
12
+ import httpx
13
+ from json_parser import SSEParser
14
+ from logger_config import setup_logger
15
+ from fastapi.responses import StreamingResponse
16
+ from fastapi.responses import JSONResponse
17
+ from response_formatter import ResponseFormatter
18
+ import traceback
19
+
20
+ # Load environment variables
21
+ load_dotenv()
22
+
23
+ # Configure logging
24
+ logging.basicConfig(
25
+ level=logging.INFO,
26
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
27
+ )
28
+ logger = logging.getLogger(__name__)
29
+
30
+ class AgentOutput(BaseModel):
31
+ """Structured output from agent processing"""
32
+ thought_content: str
33
+ observation: Optional[str]
34
+ tool_outputs: List[Dict]
35
+ citations: List[Dict]
36
+ metadata: Dict
37
+ raw_response: str
38
+
39
+ class AgentRequest(BaseModel):
40
+ """Enhanced request model with additional parameters"""
41
+ query: str
42
+ conversation_id: Optional[str] = None
43
+ stream: bool = True
44
+ inputs: Dict = {}
45
+ files: List = []
46
+ user: str = "default_user"
47
+ response_mode: str = "streaming"
48
+
49
+ class AgentProcessor:
50
+ def __init__(self, api_key: str):
51
+ self.api_key = api_key
52
+ self.api_base = "https://rag-engine.go-yamamoto.com/v1"
53
+ self.formatter = ResponseFormatter()
54
+ self.client = httpx.AsyncClient(timeout=60.0)
55
+ self.logger = setup_logger("agent_processor")
56
+
57
+ async def log_request_details(
58
+ self,
59
+ request: AgentRequest,
60
+ start_time: datetime
61
+ ) -> None:
62
+ """Log detailed request information"""
63
+ self.logger.debug(
64
+ "Request details: \n"
65
+ f"Query: {request.query}\n"
66
+ f"User: {request.user}\n"
67
+ f"Conversation ID: {request.conversation_id}\n"
68
+ f"Stream mode: {request.stream}\n"
69
+ f"Start time: {start_time}\n"
70
+ f"Inputs: {request.inputs}\n"
71
+ f"Files: {len(request.files)} files attached"
72
+ )
73
+
74
+ async def log_error(
75
+ self,
76
+ error: Exception,
77
+ context: Optional[Dict] = None
78
+ ) -> None:
79
+ """Log detailed error information"""
80
+ error_msg = (
81
+ f"Error type: {type(error).__name__}\n"
82
+ f"Error message: {str(error)}\n"
83
+ f"Stack trace:\n{traceback.format_exc()}\n"
84
+ )
85
+ if context:
86
+ error_msg += f"Context:\n{json.dumps(context, indent=2)}"
87
+
88
+ self.logger.error(error_msg)
89
+
90
+ async def cleanup(self):
91
+ """Cleanup method to properly close client"""
92
+ await self.client.aclose()
93
+
94
+ async def process_stream(self, request: AgentRequest):
95
+ start_time = datetime.now()
96
+ await self.log_request_details(request, start_time)
97
+
98
+ headers = {
99
+ "Authorization": f"Bearer {self.api_key}",
100
+ "Content-Type": "application/json",
101
+ "Accept": "text/event-stream"
102
+ }
103
+
104
+ chat_request = {
105
+ "query": request.query,
106
+ "inputs": request.inputs,
107
+ "response_mode": "streaming" if request.stream else "blocking",
108
+ "user": request.user,
109
+ "conversation_id": request.conversation_id,
110
+ "files": request.files
111
+ }
112
+
113
+ async def event_generator():
114
+ parser = SSEParser()
115
+ citations = []
116
+ metadata = {}
117
+
118
+ try:
119
+ async with self.client.stream(
120
+ "POST",
121
+ f"{self.api_base}/chat-messages",
122
+ headers=headers,
123
+ json=chat_request
124
+ ) as response:
125
+ self.logger.debug(
126
+ f"Stream connection established\n"
127
+ f"Status: {response.status_code}\n"
128
+ f"Headers: {dict(response.headers)}"
129
+ )
130
+
131
+ buffer = ""
132
+ async for line in response.aiter_lines():
133
+ if not line.strip():
134
+ continue
135
+
136
+ self.logger.debug(f"Raw SSE line: {line}")
137
+
138
+ if "data:" in line:
139
+ try:
140
+ data = line.split("data:", 1)[1].strip()
141
+ parsed = json.loads(data)
142
+
143
+ if parsed.get("event") == "message_end":
144
+ citations = parsed.get("retriever_resources", [])
145
+ metadata = parsed.get("metadata", {})
146
+ self.logger.debug(
147
+ f"Message end event:\n"
148
+ f"Citations: {citations}\n"
149
+ f"Metadata: {metadata}"
150
+ )
151
+
152
+ formatted = self.format_terminal_output(
153
+ parsed,
154
+ citations=citations,
155
+ metadata=metadata
156
+ )
157
+ if formatted:
158
+ self.logger.info(formatted)
159
+ except Exception as e:
160
+ await self.log_error(
161
+ e,
162
+ {"line": line, "event": "parse_data"}
163
+ )
164
+
165
+ buffer += line + "\n"
166
+
167
+ if line.startswith("data:") or buffer.strip().endswith("}"):
168
+ try:
169
+ processed_response = parser.parse_sse_event(buffer)
170
+ if processed_response and isinstance(processed_response, dict):
171
+ cleaned_response = self.clean_response(processed_response)
172
+ if cleaned_response:
173
+ xml_content = cleaned_response.get("content", "")
174
+ yield f"data: {xml_content}\n\n"
175
+ except Exception as parse_error:
176
+ await self.log_error(
177
+ parse_error,
178
+ {"buffer": buffer, "event": "process_buffer"}
179
+ )
180
+ error_xml = (
181
+ f"<agent_response>"
182
+ f"<error>{str(parse_error)}</error>"
183
+ f"</agent_response>"
184
+ )
185
+ yield f"data: {error_xml}\n\n"
186
+ finally:
187
+ buffer = ""
188
+
189
+ except httpx.ConnectError as e:
190
+ await self.log_error(e, {"event": "connection_error"})
191
+ error_xml = (
192
+ f"<agent_response>"
193
+ f"<error>Connection error: {str(e)}</error>"
194
+ f"</agent_response>"
195
+ )
196
+ yield f"data: {error_xml}\n\n"
197
+ except Exception as e:
198
+ await self.log_error(e, {"event": "stream_error"})
199
+ error_xml = (
200
+ f"<agent_response>"
201
+ f"<error>Streaming error: {str(e)}</error>"
202
+ f"</agent_response>"
203
+ )
204
+ yield f"data: {error_xml}\n\n"
205
+ finally:
206
+ end_time = datetime.now()
207
+ duration = (end_time - start_time).total_seconds()
208
+ self.logger.info(f"Request completed in {duration:.2f} seconds")
209
+
210
+ return StreamingResponse(
211
+ event_generator(),
212
+ media_type="text/event-stream",
213
+ headers={
214
+ "Cache-Control": "no-cache",
215
+ "Connection": "keep-alive",
216
+ "X-Accel-Buffering": "no",
217
+ "Access-Control-Allow-Origin": "*"
218
+ }
219
+ )
220
+
221
+ def format_terminal_output(
222
+ self,
223
+ response: Dict,
224
+ citations: List[Dict] = None,
225
+ metadata: Dict = None
226
+ ) -> Optional[str]:
227
+ """Format response for terminal output"""
228
+ event_type = response.get("event")
229
+
230
+ if event_type == "agent_thought":
231
+ thought = response.get("thought", "")
232
+ observation = response.get("observation", "")
233
+ terminal_output, _ = self.formatter.format_thought(
234
+ thought,
235
+ observation,
236
+ citations=citations,
237
+ metadata=metadata
238
+ )
239
+ return terminal_output
240
+
241
+ elif event_type == "agent_message":
242
+ message = response.get("answer", "")
243
+ terminal_output, _ = self.formatter.format_message(message)
244
+ return terminal_output
245
+
246
+ elif event_type == "error":
247
+ error = response.get("error", "Unknown error")
248
+ terminal_output, _ = self.formatter.format_error(error)
249
+ return terminal_output
250
+
251
+ return None
252
+
253
+ def clean_response(self, response: Dict) -> Optional[Dict]:
254
+ """Clean and transform the response for frontend consumption"""
255
+ try:
256
+ event_type = response.get("event")
257
+ if not event_type:
258
+ return None
259
+
260
+ # Handle different event types
261
+ if event_type == "agent_thought":
262
+ thought = response.get("thought", "")
263
+ observation = response.get("observation", "")
264
+ _, xml_output = self.formatter.format_thought(thought, observation)
265
+ return {
266
+ "type": "thought",
267
+ "content": xml_output
268
+ }
269
+
270
+ elif event_type == "agent_message":
271
+ message = response.get("answer", "")
272
+ _, xml_output = self.formatter.format_message(message)
273
+ return {
274
+ "type": "message",
275
+ "content": xml_output
276
+ }
277
+
278
+ elif event_type == "error":
279
+ error = response.get("error", "Unknown error")
280
+ _, xml_output = self.formatter.format_error(error)
281
+ return {
282
+ "type": "error",
283
+ "content": xml_output
284
+ }
285
+
286
+ return None
287
+ except Exception as e:
288
+ logger.error(f"Error cleaning response: {str(e)}")
289
+ return None
290
+
291
+ # Initialize FastAPI app
292
+ app = FastAPI()
293
+ agent_processor = None
294
+
295
+ # Add CORS middleware
296
+ app.add_middleware(
297
+ CORSMiddleware,
298
+ allow_origins=["*"],
299
+ allow_credentials=True,
300
+ allow_methods=["*"],
301
+ allow_headers=["*"],
302
+ )
303
+
304
+ @app.on_event("startup")
305
+ async def startup_event():
306
+ global agent_processor
307
+ api_key = os.getenv("DIFY_API_KEY", "app-kVHTrZzEmFXEBfyXOi4rro7M")
308
+ agent_processor = AgentProcessor(api_key=api_key)
309
+
310
+ @app.on_event("shutdown")
311
+ async def shutdown_event():
312
+ global agent_processor
313
+ if agent_processor:
314
+ await agent_processor.cleanup()
315
+
316
+ @app.post("/v1/agent")
317
+ async def process_agent_request(request: AgentRequest):
318
+ try:
319
+ logger.info(f"Processing agent request: {request.query}")
320
+ return await agent_processor.process_stream(request)
321
+
322
+ except Exception as e:
323
+ logger.error(f"Error in agent request processing: {e}", exc_info=True)
324
+ raise HTTPException(status_code=500, detail=str(e))
325
+
326
+ @app.middleware("http")
327
+ async def error_handling_middleware(request: Request, call_next):
328
+ try:
329
+ response = await call_next(request)
330
+ return response
331
+ except Exception as e:
332
+ logger.error(f"Unhandled error: {str(e)}", exc_info=True)
333
+ return JSONResponse(
334
+ status_code=500,
335
+ content={"error": "Internal server error occurred"}
336
+ )
337
+
338
+ # Add host and port parameters to the launch
339
+ if __name__ == "__main__":
340
+ import uvicorn
341
+ uvicorn.run(
342
+ "api:app",
343
+ host="0.0.0.0",
344
+ port=8224,
345
+ reload=True
346
+ )
api_docs.md ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ POST
2
+ /chat-messages
3
+ Send Chat Message
4
+ Send a request to the chat application.
5
+
6
+ Request Body
7
+ Name
8
+ query
9
+ Type
10
+ string
11
+ Description
12
+ User Input/Question content
13
+
14
+ Name
15
+ inputs
16
+ Type
17
+ object
18
+ Description
19
+ Allows the entry of various variable values defined by the App. The inputs parameter contains multiple key/value pairs, with each key corresponding to a specific variable and each value being the specific value for that variable. Default {}
20
+
21
+ Name
22
+ response_mode
23
+ Type
24
+ string
25
+ Description
26
+ The mode of response return, supporting:
27
+
28
+ streaming Streaming mode (recommended), implements a typewriter-like output through SSE (Server-Sent Events).
29
+ blocking Blocking mode, returns result after execution is complete. (Requests may be interrupted if the process is long) Due to Cloudflare restrictions, the request will be interrupted without a return after 100 seconds. Note: blocking mode is not supported in Agent Assistant mode
30
+ Name
31
+ user
32
+ Type
33
+ string
34
+ Description
35
+ User identifier, used to define the identity of the end-user for retrieval and statistics. Should be uniquely defined by the developer within the application.
36
+
37
+ Name
38
+ conversation_id
39
+ Type
40
+ string
41
+ Description
42
+ Conversation ID, to continue the conversation based on previous chat records, it is necessary to pass the previous message's conversation_id.
43
+
44
+ Name
45
+ files
46
+ Type
47
+ array[object]
48
+ Description
49
+ File list, suitable for inputting files (images) combined with text understanding and answering questions, available only when the model supports Vision capability.
50
+
51
+ type (string) Supported type: image (currently only supports image type)
52
+ transfer_method (string) Transfer method, remote_url for image URL / local_file for file upload
53
+ url (string) Image URL (when the transfer method is remote_url)
54
+ upload_file_id (string) Uploaded file ID, which must be obtained by uploading through the File Upload API in advance (when the transfer method is local_file)
55
+ Name
56
+ auto_generate_name
57
+ Type
58
+ bool
59
+ Description
60
+ Auto-generate title, default is true. If set to false, can achieve async title generation by calling the conversation rename API and setting auto_generate to true.
61
+
62
+ Response
63
+ When response_mode is blocking, return a CompletionResponse object. When response_mode is streaming, return a ChunkCompletionResponse stream.
64
+
65
+ ChatCompletionResponse
66
+ Returns the complete App result, Content-Type is application/json.
67
+
68
+ message_id (string) Unique message ID
69
+ conversation_id (string) Conversation ID
70
+ mode (string) App mode, fixed as chat
71
+ answer (string) Complete response content
72
+ metadata (object) Metadata
73
+ usage (Usage) Model usage information
74
+ retriever_resources (array[RetrieverResource]) Citation and Attribution List
75
+ created_at (int) Message creation timestamp, e.g., 1705395332
76
+ ChunkChatCompletionResponse
77
+ Returns the stream chunks outputted by the App, Content-Type is text/event-stream. Each streaming chunk starts with data:, separated by two newline characters \n\n, as shown below:
78
+
79
+ data: {"event": "message", "task_id": "900bbd43-dc0b-4383-a372-aa6e6c414227", "id": "663c5084-a254-4040-8ad3-51f2a3c1a77c", "answer": "Hi", "created_at": 1705398420}\n\n
80
+
81
+ Copy
82
+ Copied!
83
+ The structure of the streaming chunks varies depending on the event:
84
+
85
+ event: message LLM returns text chunk event, i.e., the complete text is output in a chunked fashion.
86
+ task_id (string) Task ID, used for request tracking and the below Stop Generate API
87
+ message_id (string) Unique message ID
88
+ conversation_id (string) Conversation ID
89
+ answer (string) LLM returned text chunk content
90
+ created_at (int) Creation timestamp, e.g., 1705395332
91
+ event: agent_message LLM returns text chunk event, i.e., with Agent Assistant enabled, the complete text is output in a chunked fashion (Only supported in Agent mode)
92
+ task_id (string) Task ID, used for request tracking and the below Stop Generate API
93
+ message_id (string) Unique message ID
94
+ conversation_id (string) Conversation ID
95
+ answer (string) LLM returned text chunk content
96
+ created_at (int) Creation timestamp, e.g., 1705395332
97
+ event: tts_message TTS audio stream event, that is, speech synthesis output. The content is an audio block in Mp3 format, encoded as a base64 string. When playing, simply decode the base64 and feed it into the player. (This message is available only when auto-play is enabled)
98
+ task_id (string) Task ID, used for request tracking and the stop response interface below
99
+ message_id (string) Unique message ID
100
+ audio (string) The audio after speech synthesis, encoded in base64 text content, when playing, simply decode the base64 and feed it into the player
101
+ created_at (int) Creation timestamp, e.g.: 1705395332
102
+ event: tts_message_end TTS audio stream end event, receiving this event indicates the end of the audio stream.
103
+ task_id (string) Task ID, used for request tracking and the stop response interface below
104
+ message_id (string) Unique message ID
105
+ audio (string) The end event has no audio, so this is an empty string
106
+ created_at (int) Creation timestamp, e.g.: 1705395332
107
+ event: agent_thought thought of Agent, contains the thought of LLM, input and output of tool calls (Only supported in Agent mode)
108
+ id (string) Agent thought ID, every iteration has a unique agent thought ID
109
+ task_id (string) (string) Task ID, used for request tracking and the below Stop Generate API
110
+ message_id (string) Unique message ID
111
+ position (int) Position of current agent thought, each message may have multiple thoughts in order.
112
+ thought (string) What LLM is thinking about
113
+ observation (string) Response from tool calls
114
+ tool (string) A list of tools represents which tools are called,split by ;
115
+ tool_input (string) Input of tools in JSON format. Like: {"dalle3": {"prompt": "a cute cat"}}.
116
+ created_at (int) Creation timestamp, e.g., 1705395332
117
+ message_files (array[string]) Refer to message_file event
118
+ file_id (string) File ID
119
+ conversation_id (string) Conversation ID
120
+ event: message_file Message file event, a new file has created by tool
121
+ id (string) File unique ID
122
+ type (string) File type,only allow "image" currently
123
+ belongs_to (string) Belongs to, it will only be an 'assistant' here
124
+ url (string) Remote url of file
125
+ conversation_id (string) Conversation ID
126
+ event: message_end Message end event, receiving this event means streaming has ended.
127
+ task_id (string) Task ID, used for request tracking and the below Stop Generate API
128
+ message_id (string) Unique message ID
129
+ conversation_id (string) Conversation ID
130
+ metadata (object) Metadata
131
+ usage (Usage) Model usage information
132
+ retriever_resources (array[RetrieverResource]) Citation and Attribution List
133
+ event: message_replace Message content replacement event. When output content moderation is enabled, if the content is flagged, then the message content will be replaced with a preset reply through this event.
134
+ task_id (string) Task ID, used for request tracking and the below Stop Generate API
135
+ message_id (string) Unique message ID
136
+ conversation_id (string) Conversation ID
137
+ answer (string) Replacement content (directly replaces all LLM reply text)
138
+ created_at (int) Creation timestamp, e.g., 1705395332
139
+ event: error Exceptions that occur during the streaming process will be output in the form of stream events, and reception of an error event will end the stream.
140
+ task_id (string) Task ID, used for request tracking and the below Stop Generate API
141
+ message_id (string) Unique message ID
142
+ status (int) HTTP status code
143
+ code (string) Error code
144
+ message (string) Error message
145
+ event: ping Ping event every 10 seconds to keep the connection alive.
146
+ Errors
147
+ 404, Conversation does not exists
148
+ 400, invalid_param, abnormal parameter input
149
+ 400, app_unavailable, App configuration unavailable
150
+ 400, provider_not_initialize, no available model credential configuration
151
+ 400, provider_quota_exceeded, model invocation quota insufficient
152
+ 400, model_currently_not_support, current model unavailable
153
+ 400, completion_request_error, text generation failed
154
+ 500, internal server error
dify_client_python/.DS_Store ADDED
Binary file (6.15 kB). View file
 
dify_client_python/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 haoyuhu
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
dify_client_python/MANIFEST.in ADDED
@@ -0,0 +1 @@
 
 
1
+ recursive-include dify_client *.py
dify_client_python/README.md ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dify-client-python
2
+
3
+ Welcome to the `dify-client-python` repository! This Python package provides a convenient and powerful interface to
4
+ interact with the Dify API, enabling developers to integrate a wide range of features into their applications with ease.
5
+
6
+ ## Main Features
7
+
8
+ * **Synchronous and Asynchronous Support**: The client offers both synchronous and asynchronous methods, allowing for
9
+ flexible integration into various Python codebases and frameworks.
10
+ * **Stream and Non-stream Support**: Seamlessly work with both streaming and non-streaming endpoints of the Dify API for
11
+ real-time and batch processing use cases.
12
+ * **Comprehensive Endpoint Coverage**: Support completion, chat, workflows, feedback, file uploads, etc., the client
13
+ covers all available Dify API endpoints.
14
+
15
+ ## Installation
16
+
17
+ Before using the `dify-client-python` client, you'll need to install it. You can easily install it using `pip`:
18
+
19
+ ```bash
20
+ pip install dify-client-python
21
+ ```
22
+
23
+ ## Quick Start
24
+
25
+ Here's a quick example of how you can use the Dify Client to send a chat message.
26
+
27
+ ```python
28
+ import uuid
29
+ from dify_client import Client, models
30
+
31
+ # Initialize the client with your API key
32
+ client = Client(
33
+ api_key="your-api-key",
34
+ api_base="http://localhost/v1",
35
+ )
36
+ user = str(uuid.uuid4())
37
+
38
+ # Create a blocking chat request
39
+ blocking_chat_req = models.ChatRequest(
40
+ query="Hi, dify-client-python!",
41
+ inputs={"city": "Beijing"},
42
+ user=user,
43
+ response_mode=models.ResponseMode.BLOCKING,
44
+ )
45
+
46
+ # Send the chat message
47
+ chat_response = client.chat_messages(blocking_chat_req, timeout=60.)
48
+ print(chat_response)
49
+
50
+ # Create a streaming chat request
51
+ streaming_chat_req = models.ChatRequest(
52
+ query="Hi, dify-client-python!",
53
+ inputs={"city": "Beijing"},
54
+ user=user,
55
+ response_mode=models.ResponseMode.STREAMING,
56
+ )
57
+
58
+ # Send the chat message
59
+ for chunk in client.chat_messages(streaming_chat_req, timeout=60.):
60
+ print(chunk)
61
+ ```
62
+
63
+ For asynchronous operations, use the `AsyncClient` in a similar fashion:
64
+
65
+ ```python
66
+ import asyncio
67
+ import uuid
68
+
69
+ from dify_client import AsyncClient, models
70
+
71
+ # Initialize the async client with your API key
72
+ async_client = AsyncClient(
73
+ api_key="your-api-key",
74
+ api_base="http://localhost/v1",
75
+ )
76
+
77
+
78
+ # Define an asynchronous function to send a blocking chat message with BLOCKING ResponseMode
79
+ async def send_chat_message():
80
+ user = str(uuid.uuid4())
81
+ # Create a blocking chat request
82
+ blocking_chat_req = models.ChatRequest(
83
+ query="Hi, dify-client-python!",
84
+ inputs={"city": "Beijing"},
85
+ user=user,
86
+ response_mode=models.ResponseMode.BLOCKING,
87
+ )
88
+ chat_response = await async_client.achat_messages(blocking_chat_req, timeout=60.)
89
+ print(chat_response)
90
+
91
+
92
+ # Define an asynchronous function to send a chat message with STREAMING ResponseMode
93
+ async def send_chat_message_stream():
94
+ user = str(uuid.uuid4())
95
+ # Create a blocking chat request
96
+ streaming_chat_req = models.ChatRequest(
97
+ query="Hi, dify-client-python!",
98
+ inputs={"city": "Beijing"},
99
+ user=user,
100
+ response_mode=models.ResponseMode.STREAMING,
101
+ )
102
+ async for chunk in await async_client.achat_messages(streaming_chat_req, timeout=60.):
103
+ print(chunk)
104
+
105
+
106
+ # Run the asynchronous function
107
+ asyncio.gather(send_chat_message(), send_chat_message_stream())
108
+ ```
109
+
110
+ ## Documentation
111
+
112
+ For detailed information on all the functionalities and how to use each endpoint, please refer to the official Dify API
113
+ documentation. This will provide you with comprehensive guidance on request and response structures, error handling, and
114
+ other important details.
115
+
116
+ ## Contributing
117
+
118
+ Contributions are welcome! If you would like to contribute to the `dify-client-python`, please feel free to make a pull
119
+ request or open an issue to discuss potential changes.
120
+
121
+ ## License
122
+
123
+ This project is licensed under the MIT License - see the LICENSE file for details.
124
+
125
+ ```text
126
+ MIT License
127
+
128
+ Copyright (c) 2024 haoyuhu
129
+
130
+ Permission is hereby granted, free of charge, to any person obtaining a copy
131
+ of this software and associated documentation files (the "Software"), to deal
132
+ in the Software without restriction, including without limitation the rights
133
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
134
+ copies of the Software, and to permit persons to whom the Software is
135
+ furnished to do so, subject to the following conditions:
136
+
137
+ The above copyright notice and this permission notice shall be included in all
138
+ copies or substantial portions of the Software.
139
+
140
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
141
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
142
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
143
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
144
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
145
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
146
+ SOFTWARE.
147
+
148
+ ```
149
+
150
+ ## Support
151
+
152
+ If you encounter any issues or have questions regarding the usage of this client, please reach out to the Dify Client
153
+ support team.
154
+
155
+ Happy coding! 🚀
dify_client_python/build.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ set -e
4
+
5
+ rm -rf build dist *.egg-info
6
+
7
+ pip install setuptools wheel twine
8
+ python setup.py sdist bdist_wheel
9
+ twine upload dist/*
dify_client_python/build/lib/dify_client/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from ._clientx import Client, AsyncClient
dify_client_python/build/lib/dify_client/_clientx.py ADDED
@@ -0,0 +1,660 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Any, Mapping, Iterator, AsyncIterator, Union, Dict
2
+
3
+ try:
4
+ from enum import StrEnum
5
+ except ImportError:
6
+ from strenum import StrEnum
7
+ try:
8
+ from http import HTTPMethod
9
+ except ImportError:
10
+ class HTTPMethod(StrEnum):
11
+ GET = "GET"
12
+ POST = "POST"
13
+ PUT = "PUT"
14
+ DELETE = "DELETE"
15
+
16
+ import httpx
17
+ # noinspection PyProtectedMember
18
+ import httpx._types as types
19
+ from httpx_sse import connect_sse, ServerSentEvent, aconnect_sse
20
+ from pydantic import BaseModel
21
+
22
+ from dify_client import errors, models
23
+
24
+ _httpx_client = httpx.Client()
25
+ _async_httpx_client = httpx.AsyncClient()
26
+
27
+ IGNORED_STREAM_EVENTS = (models.StreamEvent.PING.value,)
28
+
29
+ # feedback
30
+ ENDPOINT_FEEDBACKS = "/messages/{message_id}/feedbacks"
31
+ # suggest
32
+ ENDPOINT_SUGGESTED = "/messages/{message_id}/suggested"
33
+ # files upload
34
+ ENDPOINT_FILES_UPLOAD = "/files/upload"
35
+ # completion
36
+ ENDPOINT_COMPLETION_MESSAGES = "/completion-messages"
37
+ ENDPOINT_STOP_COMPLETION_MESSAGES = "/completion-messages/{task_id}/stop"
38
+ # chat
39
+ ENDPOINT_CHAT_MESSAGES = "/chat-messages"
40
+ ENDPOINT_STOP_CHAT_MESSAGES = "/chat-messages/{task_id}/stop"
41
+ # workflow
42
+ ENDPOINT_RUN_WORKFLOWS = "/workflows/run"
43
+ ENDPOINT_STOP_WORKFLOWS = "/workflows/{task_id}/stop"
44
+ # audio <-> text
45
+ ENDPOINT_TEXT_TO_AUDIO = "/text-to-audio"
46
+ ENDPOINT_AUDIO_TO_TEXT = "/audio-to-text"
47
+
48
+
49
+ class Client(BaseModel):
50
+ api_key: str
51
+ api_base: Optional[str] = "https://api.dify.ai/v1"
52
+
53
+ def request(self, endpoint: str, method: str,
54
+ content: Optional[types.RequestContent] = None,
55
+ data: Optional[types.RequestData] = None,
56
+ files: Optional[types.RequestFiles] = None,
57
+ json: Optional[Any] = None,
58
+ params: Optional[types.QueryParamTypes] = None,
59
+ headers: Optional[Mapping[str, str]] = None,
60
+ **kwargs: object,
61
+ ) -> httpx.Response:
62
+ """
63
+ Sends a synchronous HTTP request to the specified endpoint.
64
+
65
+ Args:
66
+ endpoint: The API endpoint to send the request to.
67
+ method: The HTTP method to use (e.g., 'GET', 'POST').
68
+ content: Raw content to include in the request body.
69
+ data: Form data to include in the request body.
70
+ files: Files to include in the request body.
71
+ json: JSON data to include in the request body.
72
+ params: Query parameters to include in the request URL.
73
+ headers: Additional headers to include in the request.
74
+ **kwargs: Extra keyword arguments to pass to the request function.
75
+
76
+ Returns:
77
+ A `httpx.Response` object containing the HTTP response.
78
+
79
+ Raises:
80
+ Various DifyAPIError exceptions if the response contains an error.
81
+ """
82
+ merged_headers = {}
83
+ if headers:
84
+ merged_headers.update(headers)
85
+ self._prepare_auth_headers(merged_headers)
86
+
87
+ response = _httpx_client.request(method, endpoint, content=content, data=data, files=files, json=json,
88
+ params=params, headers=merged_headers, **kwargs)
89
+ errors.raise_for_status(response)
90
+ return response
91
+
92
+ def request_stream(self, endpoint: str, method: str,
93
+ content: Optional[types.RequestContent] = None,
94
+ data: Optional[types.RequestData] = None,
95
+ files: Optional[types.RequestFiles] = None,
96
+ json: Optional[Any] = None,
97
+ params: Optional[types.QueryParamTypes] = None,
98
+ headers: Optional[Mapping[str, str]] = None,
99
+ **kwargs,
100
+ ) -> Iterator[ServerSentEvent]:
101
+ """
102
+ Opens a server-sent events (SSE) stream to the specified endpoint.
103
+
104
+ Args:
105
+ endpoint: The API endpoint to send the request to.
106
+ method: The HTTP method to use (e.g., 'GET', 'POST').
107
+ content: Raw content to include in the request body.
108
+ data: Form data to include in the request body.
109
+ files: Files to include in the request body.
110
+ json: JSON data to include in the request body.
111
+ params: Query parameters to include in the request URL.
112
+ headers: Additional headers to include in the request.
113
+ **kwargs: Extra keyword arguments to pass to the request function.
114
+
115
+ Returns:
116
+ An iterator of `ServerSentEvent` objects representing the stream of events.
117
+
118
+ Raises:
119
+ Various DifyAPIError exceptions if an error event is received in the stream.
120
+ """
121
+ merged_headers = {}
122
+ if headers:
123
+ merged_headers.update(headers)
124
+ self._prepare_auth_headers(merged_headers)
125
+
126
+ with connect_sse(_httpx_client, method, endpoint, headers=merged_headers,
127
+ content=content, data=data, files=files, json=json, params=params, **kwargs) as event_source:
128
+ if not _check_stream_content_type(event_source.response):
129
+ event_source.response.read()
130
+ errors.raise_for_status(event_source.response)
131
+ for sse in event_source.iter_sse():
132
+ errors.raise_for_status(sse)
133
+ if sse.event in IGNORED_STREAM_EVENTS or sse.data in IGNORED_STREAM_EVENTS:
134
+ continue
135
+ yield sse
136
+
137
+ def feedback_messages(self, message_id: str, req: models.FeedbackRequest, **kwargs) -> models.FeedbackResponse:
138
+ """
139
+ Submits feedback for a specific message.
140
+
141
+ Args:
142
+ message_id: The identifier of the message to submit feedback for.
143
+ req: A `FeedbackRequest` object containing the feedback details, such as the rating.
144
+ **kwargs: Extra keyword arguments to pass to the request function.
145
+
146
+ Returns:
147
+ A `FeedbackResponse` object containing the result of the feedback submission.
148
+ """
149
+ response = self.request(
150
+ self._prepare_url(ENDPOINT_FEEDBACKS, message_id=message_id),
151
+ HTTPMethod.POST,
152
+ json=req.model_dump(),
153
+ **kwargs,
154
+ )
155
+ return models.FeedbackResponse(**response.json())
156
+
157
+ def suggest_messages(self, message_id: str, req: models.ChatSuggestRequest, **kwargs) -> models.ChatSuggestResponse:
158
+ """
159
+ Retrieves suggested messages based on a specific message.
160
+
161
+ Args:
162
+ message_id: The identifier of the message to get suggestions for.
163
+ req: A `ChatSuggestRequest` object containing the request details.
164
+ **kwargs: Extra keyword arguments to pass to the request function.
165
+
166
+ Returns:
167
+ A `ChatSuggestResponse` object containing suggested messages.
168
+ """
169
+ response = self.request(
170
+ self._prepare_url(ENDPOINT_SUGGESTED, message_id=message_id),
171
+ HTTPMethod.GET,
172
+ params=req.model_dump(),
173
+ **kwargs,
174
+ )
175
+ return models.ChatSuggestResponse(**response.json())
176
+
177
+ def upload_files(self, file: types.FileTypes, req: models.UploadFileRequest,
178
+ **kwargs) -> models.UploadFileResponse:
179
+ """
180
+ Uploads a file to be used in subsequent requests.
181
+
182
+ Args:
183
+ file: The file to upload. This can be a file-like object, or a tuple of
184
+ (`filename`, file-like object, mime_type).
185
+ req: An `UploadFileRequest` object containing the upload details, such as the user who is uploading.
186
+ **kwargs: Extra keyword arguments to pass to the request function.
187
+
188
+ Returns:
189
+ An `UploadFileResponse` object containing details about the uploaded file, such as its identifier and URL.
190
+ """
191
+ response = self.request(
192
+ self._prepare_url(ENDPOINT_FILES_UPLOAD),
193
+ HTTPMethod.POST,
194
+ data=req.model_dump(),
195
+ files=[("file", file)],
196
+ **kwargs,
197
+ )
198
+ return models.UploadFileResponse(**response.json())
199
+
200
+ def completion_messages(self, req: models.CompletionRequest, **kwargs) \
201
+ -> Union[models.CompletionResponse, Iterator[models.CompletionStreamResponse]]:
202
+ """
203
+ Sends a request to generate a completion or a series of completions based on the provided input.
204
+
205
+ Returns:
206
+ If the response mode is blocking, it returns a `CompletionResponse` object containing the generated message.
207
+ If the response mode is streaming, it returns an iterator of `CompletionStreamResponse` objects containing
208
+ the stream of generated events.
209
+ """
210
+ if req.response_mode == models.ResponseMode.BLOCKING:
211
+ return self._completion_messages(req, **kwargs)
212
+ if req.response_mode == models.ResponseMode.STREAMING:
213
+ return self._completion_messages_stream(req, **kwargs)
214
+ raise ValueError(f"Invalid request_mode: {req.response_mode}")
215
+
216
+ def _completion_messages(self, req: models.CompletionRequest, **kwargs) -> models.CompletionResponse:
217
+ response = self.request(
218
+ self._prepare_url(ENDPOINT_COMPLETION_MESSAGES),
219
+ HTTPMethod.POST,
220
+ json=req.model_dump(),
221
+ **kwargs,
222
+ )
223
+ return models.CompletionResponse(**response.json())
224
+
225
+ def _completion_messages_stream(self, req: models.CompletionRequest, **kwargs) \
226
+ -> Iterator[models.CompletionStreamResponse]:
227
+ event_source = self.request_stream(
228
+ self._prepare_url(ENDPOINT_COMPLETION_MESSAGES),
229
+ HTTPMethod.POST,
230
+ json=req.model_dump(),
231
+ **kwargs,
232
+ )
233
+ for sse in event_source:
234
+ yield models.build_completion_stream_response(sse.json())
235
+
236
+ def stop_completion_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
237
+ """
238
+ Sends a request to stop a streaming completion task.
239
+
240
+ Returns:
241
+ A `StopResponse` object indicating the success of the operation.
242
+ """
243
+ return self._stop_stream(self._prepare_url(ENDPOINT_STOP_COMPLETION_MESSAGES, task_id=task_id), req, **kwargs)
244
+
245
+ def chat_messages(self, req: models.ChatRequest, **kwargs) \
246
+ -> Union[models.ChatResponse, Iterator[models.ChatStreamResponse]]:
247
+ """
248
+ Sends a request to generate a chat message or a series of chat messages based on the provided input.
249
+
250
+ Returns:
251
+ If the response mode is blocking, it returns a `ChatResponse` object containing the generated chat message.
252
+ If the response mode is streaming, it returns an iterator of `ChatStreamResponse` objects containing the
253
+ stream of chat events.
254
+ """
255
+ if req.response_mode == models.ResponseMode.BLOCKING:
256
+ return self._chat_messages(req, **kwargs)
257
+ if req.response_mode == models.ResponseMode.STREAMING:
258
+ return self._chat_messages_stream(req, **kwargs)
259
+ raise ValueError(f"Invalid request_mode: {req.response_mode}")
260
+
261
+ def _chat_messages(self, req: models.ChatRequest, **kwargs) -> models.ChatResponse:
262
+ response = self.request(
263
+ self._prepare_url(ENDPOINT_CHAT_MESSAGES),
264
+ HTTPMethod.POST,
265
+ json=req.model_dump(),
266
+ **kwargs,
267
+ )
268
+ return models.ChatResponse(**response.json())
269
+
270
+ def _chat_messages_stream(self, req: models.ChatRequest, **kwargs) -> Iterator[models.ChatStreamResponse]:
271
+ event_source = self.request_stream(
272
+ self._prepare_url(ENDPOINT_CHAT_MESSAGES),
273
+ HTTPMethod.POST,
274
+ json=req.model_dump(),
275
+ **kwargs,
276
+ )
277
+ for sse in event_source:
278
+ yield models.build_chat_stream_response(sse.json())
279
+
280
+ def stop_chat_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
281
+ """
282
+ Sends a request to stop a streaming chat task.
283
+
284
+ Returns:
285
+ A `StopResponse` object indicating the success of the operation.
286
+ """
287
+ return self._stop_stream(self._prepare_url(ENDPOINT_STOP_CHAT_MESSAGES, task_id=task_id), req, **kwargs)
288
+
289
+ def run_workflows(self, req: models.WorkflowsRunRequest, **kwargs) \
290
+ -> Union[models.WorkflowsRunResponse, Iterator[models.WorkflowsRunStreamResponse]]:
291
+ """
292
+ Initiates the execution of a workflow, which can consist of multiple steps and actions.
293
+
294
+ Returns:
295
+ If the response mode is blocking, it returns a `WorkflowsRunResponse` object containing the results of the
296
+ completed workflow.
297
+ If the response mode is streaming, it returns an iterator of `WorkflowsRunStreamResponse` objects
298
+ containing the stream of workflow events.
299
+ """
300
+ if req.response_mode == models.ResponseMode.BLOCKING:
301
+ return self._run_workflows(req, **kwargs)
302
+ if req.response_mode == models.ResponseMode.STREAMING:
303
+ return self._run_workflows_stream(req, **kwargs)
304
+ raise ValueError(f"Invalid request_mode: {req.response_mode}")
305
+
306
+ def _run_workflows(self, req: models.WorkflowsRunRequest, **kwargs) -> models.WorkflowsRunResponse:
307
+ response = self.request(
308
+ self._prepare_url(ENDPOINT_RUN_WORKFLOWS),
309
+ HTTPMethod.POST,
310
+ json=req.model_dump(),
311
+ **kwargs,
312
+ )
313
+ return models.WorkflowsRunResponse(**response.json())
314
+
315
+ def _run_workflows_stream(self, req: models.WorkflowsRunRequest, **kwargs) \
316
+ -> Iterator[models.WorkflowsRunStreamResponse]:
317
+ event_source = self.request_stream(
318
+ self._prepare_url(ENDPOINT_RUN_WORKFLOWS),
319
+ HTTPMethod.POST,
320
+ json=req.model_dump(),
321
+ **kwargs,
322
+ )
323
+ for sse in event_source:
324
+ yield models.build_workflows_stream_response(sse.json())
325
+
326
+ def stop_workflows(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
327
+ """
328
+ Sends a request to stop a streaming workflow task.
329
+
330
+ Returns:
331
+ A `StopResponse` object indicating the success of the operation.
332
+ """
333
+ return self._stop_stream(self._prepare_url(ENDPOINT_STOP_WORKFLOWS, task_id=task_id), req, **kwargs)
334
+
335
+ def _stop_stream(self, endpoint: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
336
+ response = self.request(
337
+ endpoint,
338
+ HTTPMethod.POST,
339
+ json=req.model_dump(),
340
+ **kwargs,
341
+ )
342
+ return models.StopResponse(**response.json())
343
+
344
+ def _prepare_url(self, endpoint: str, **kwargs) -> str:
345
+ return self.api_base + endpoint.format(**kwargs)
346
+
347
+ def _prepare_auth_headers(self, headers: Dict[str, str]):
348
+ if "authorization" not in (key.lower() for key in headers.keys()):
349
+ headers["Authorization"] = f"Bearer {self.api_key}"
350
+
351
+
352
+ class AsyncClient(BaseModel):
353
+ api_key: str
354
+ api_base: Optional[str] = "https://api.dify.ai/v1"
355
+
356
+ async def arequest(self, endpoint: str, method: str,
357
+ content: Optional[types.RequestContent] = None,
358
+ data: Optional[types.RequestData] = None,
359
+ files: Optional[types.RequestFiles] = None,
360
+ json: Optional[Any] = None,
361
+ params: Optional[types.QueryParamTypes] = None,
362
+ headers: Optional[Mapping[str, str]] = None,
363
+ **kwargs,
364
+ ) -> httpx.Response:
365
+ """
366
+ Asynchronously sends a request to the specified Dify API endpoint.
367
+
368
+ Args:
369
+ endpoint: The endpoint URL to which the request is sent.
370
+ method: The HTTP method to be used for the request (e.g., 'GET', 'POST').
371
+ content: Raw content to include in the request body, if any.
372
+ data: Form data to be sent in the request body.
373
+ files: Files to be uploaded with the request.
374
+ json: JSON data to be sent in the request body.
375
+ params: Query parameters to be included in the request URL.
376
+ headers: Additional headers to be sent with the request.
377
+ **kwargs: Extra keyword arguments to be passed to the underlying HTTPX request function.
378
+
379
+ Returns:
380
+ A httpx.Response object containing the server's response to the HTTP request.
381
+
382
+ Raises:
383
+ Various DifyAPIError exceptions if the response contains an error.
384
+ """
385
+ merged_headers = {}
386
+ if headers:
387
+ merged_headers.update(headers)
388
+ self._prepare_auth_headers(merged_headers)
389
+
390
+ response = await _async_httpx_client.request(method, endpoint, content=content, data=data, files=files,
391
+ json=json, params=params, headers=merged_headers, **kwargs)
392
+ errors.raise_for_status(response)
393
+ return response
394
+
395
+ async def arequest_stream(self, endpoint: str, method: str,
396
+ content: Optional[types.RequestContent] = None,
397
+ data: Optional[types.RequestData] = None,
398
+ files: Optional[types.RequestFiles] = None,
399
+ json: Optional[Any] = None,
400
+ params: Optional[types.QueryParamTypes] = None,
401
+ headers: Optional[Mapping[str, str]] = None,
402
+ **kwargs,
403
+ ) -> AsyncIterator[ServerSentEvent]:
404
+ """
405
+ Asynchronously establishes a streaming connection to the specified Dify API endpoint.
406
+
407
+ Args:
408
+ endpoint: The endpoint URL to which the request is sent.
409
+ method: The HTTP method to be used for the request (e.g., 'GET', 'POST').
410
+ content: Raw content to include in the request body, if any.
411
+ data: Form data to be sent in the request body.
412
+ files: Files to be uploaded with the request.
413
+ json: JSON data to be sent in the request body.
414
+ params: Query parameters to be included in the request URL.
415
+ headers: Additional headers to be sent with the request.
416
+ **kwargs: Extra keyword arguments to be passed to the underlying HTTPX request function.
417
+
418
+ Yields:
419
+ ServerSentEvent objects representing the events received from the server.
420
+
421
+ Raises:
422
+ Various DifyAPIError exceptions if an error event is received in the stream.
423
+ """
424
+ merged_headers = {}
425
+ if headers:
426
+ merged_headers.update(headers)
427
+ self._prepare_auth_headers(merged_headers)
428
+
429
+ async with aconnect_sse(_async_httpx_client, method, endpoint, headers=merged_headers,
430
+ content=content, data=data, files=files, json=json, params=params,
431
+ **kwargs) as event_source:
432
+ if not _check_stream_content_type(event_source.response):
433
+ await event_source.response.aread()
434
+ errors.raise_for_status(event_source.response)
435
+ async for sse in event_source.aiter_sse():
436
+ errors.raise_for_status(sse)
437
+ if sse.event in IGNORED_STREAM_EVENTS or sse.data in IGNORED_STREAM_EVENTS:
438
+ continue
439
+ yield sse
440
+
441
+ async def afeedback_messages(self, message_id: str, req: models.FeedbackRequest, **kwargs) \
442
+ -> models.FeedbackResponse:
443
+ """
444
+ Submits feedback for a specific message.
445
+
446
+ Args:
447
+ message_id: The identifier of the message to submit feedback for.
448
+ req: A `FeedbackRequest` object containing the feedback details, such as the rating.
449
+ **kwargs: Extra keyword arguments to pass to the request function.
450
+
451
+ Returns:
452
+ A `FeedbackResponse` object containing the result of the feedback submission.
453
+ """
454
+ response = await self.arequest(
455
+ self._prepare_url(ENDPOINT_FEEDBACKS, message_id=message_id),
456
+ HTTPMethod.POST,
457
+ json=req.model_dump(),
458
+ **kwargs,
459
+ )
460
+ return models.FeedbackResponse(**response.json())
461
+
462
+ async def asuggest_messages(self, message_id: str, req: models.ChatSuggestRequest, **kwargs) \
463
+ -> models.ChatSuggestResponse:
464
+ """
465
+ Retrieves suggested messages based on a specific message.
466
+
467
+ Args:
468
+ message_id: The identifier of the message to get suggestions for.
469
+ req: A `ChatSuggestRequest` object containing the request details.
470
+ **kwargs: Extra keyword arguments to pass to the request function.
471
+
472
+ Returns:
473
+ A `ChatSuggestResponse` object containing suggested messages.
474
+ """
475
+ response = await self.arequest(
476
+ self._prepare_url(ENDPOINT_SUGGESTED, message_id=message_id),
477
+ HTTPMethod.GET,
478
+ params=req.model_dump(),
479
+ **kwargs,
480
+ )
481
+ return models.ChatSuggestResponse(**response.json())
482
+
483
+ async def aupload_files(self, file: types.FileTypes, req: models.UploadFileRequest, **kwargs) \
484
+ -> models.UploadFileResponse:
485
+ """
486
+ Uploads a file to be used in subsequent requests.
487
+
488
+ Args:
489
+ file: The file to upload. This can be a file-like object, or a tuple of
490
+ (`filename`, file-like object, mime_type).
491
+ req: An `UploadFileRequest` object containing the upload details, such as the user who is uploading.
492
+ **kwargs: Extra keyword arguments to pass to the request function.
493
+
494
+ Returns:
495
+ An `UploadFileResponse` object containing details about the uploaded file, such as its identifier and URL.
496
+ """
497
+ response = await self.arequest(
498
+ self._prepare_url(ENDPOINT_FILES_UPLOAD),
499
+ HTTPMethod.POST,
500
+ data=req.model_dump(),
501
+ files=[("file", file)],
502
+ **kwargs,
503
+ )
504
+ return models.UploadFileResponse(**response.json())
505
+
506
+ async def acompletion_messages(self, req: models.CompletionRequest, **kwargs) \
507
+ -> Union[models.CompletionResponse, AsyncIterator[models.CompletionStreamResponse]]:
508
+ """
509
+ Sends a request to generate a completion or a series of completions based on the provided input.
510
+
511
+ Returns:
512
+ If the response mode is blocking, it returns a `CompletionResponse` object containing the generated message.
513
+ If the response mode is streaming, it returns an iterator of `CompletionStreamResponse` objects containing
514
+ the stream of generated events.
515
+ """
516
+ if req.response_mode == models.ResponseMode.BLOCKING:
517
+ return await self._acompletion_messages(req, **kwargs)
518
+ if req.response_mode == models.ResponseMode.STREAMING:
519
+ return self._acompletion_messages_stream(req, **kwargs)
520
+ raise ValueError(f"Invalid request_mode: {req.response_mode}")
521
+
522
+ async def _acompletion_messages(self, req: models.CompletionRequest, **kwargs) -> models.CompletionResponse:
523
+ response = await self.arequest(
524
+ self._prepare_url(ENDPOINT_COMPLETION_MESSAGES),
525
+ HTTPMethod.POST,
526
+ json=req.model_dump(),
527
+ **kwargs,
528
+ )
529
+ return models.CompletionResponse(**response.json())
530
+
531
+ async def _acompletion_messages_stream(self, req: models.CompletionRequest, **kwargs) \
532
+ -> AsyncIterator[models.CompletionStreamResponse]:
533
+ async for sse in self.arequest_stream(
534
+ self._prepare_url(ENDPOINT_COMPLETION_MESSAGES),
535
+ HTTPMethod.POST,
536
+ json=req.model_dump(),
537
+ **kwargs):
538
+ yield models.build_completion_stream_response(sse.json())
539
+
540
+ async def astop_completion_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
541
+ """
542
+ Sends a request to stop a streaming completion task.
543
+
544
+ Returns:
545
+ A `StopResponse` object indicating the success of the operation.
546
+ """
547
+ return await self._astop_stream(
548
+ self._prepare_url(ENDPOINT_STOP_COMPLETION_MESSAGES, task_id=task_id), req, **kwargs)
549
+
550
+ async def achat_messages(self, req: models.ChatRequest, **kwargs) \
551
+ -> Union[models.ChatResponse, AsyncIterator[models.ChatStreamResponse]]:
552
+ """
553
+ Sends a request to generate a chat message or a series of chat messages based on the provided input.
554
+
555
+ Returns:
556
+ If the response mode is blocking, it returns a `ChatResponse` object containing the generated chat message.
557
+ If the response mode is streaming, it returns an iterator of `ChatStreamResponse` objects containing the
558
+ stream of chat events.
559
+ """
560
+ if req.response_mode == models.ResponseMode.BLOCKING:
561
+ return await self._achat_messages(req, **kwargs)
562
+ if req.response_mode == models.ResponseMode.STREAMING:
563
+ return self._achat_messages_stream(req, **kwargs)
564
+ raise ValueError(f"Invalid request_mode: {req.response_mode}")
565
+
566
+ async def _achat_messages(self, req: models.ChatRequest, **kwargs) -> models.ChatResponse:
567
+ response = await self.arequest(
568
+ self._prepare_url(ENDPOINT_CHAT_MESSAGES),
569
+ HTTPMethod.POST,
570
+ json=req.model_dump(),
571
+ **kwargs,
572
+ )
573
+ return models.ChatResponse(**response.json())
574
+
575
+ async def _achat_messages_stream(self, req: models.ChatRequest, **kwargs) \
576
+ -> AsyncIterator[models.ChatStreamResponse]:
577
+ async for sse in self.arequest_stream(
578
+ self._prepare_url(ENDPOINT_CHAT_MESSAGES),
579
+ HTTPMethod.POST,
580
+ json=req.model_dump(),
581
+ **kwargs):
582
+ yield models.build_chat_stream_response(sse.json())
583
+
584
+ async def astop_chat_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
585
+ """
586
+ Sends a request to stop a streaming chat task.
587
+
588
+ Returns:
589
+ A `StopResponse` object indicating the success of the operation.
590
+ """
591
+ return await self._astop_stream(self._prepare_url(ENDPOINT_STOP_CHAT_MESSAGES, task_id=task_id), req, **kwargs)
592
+
593
+ async def arun_workflows(self, req: models.WorkflowsRunRequest, **kwargs) \
594
+ -> Union[models.WorkflowsRunResponse, AsyncIterator[models.WorkflowsStreamResponse]]:
595
+ """
596
+ Initiates the execution of a workflow, which can consist of multiple steps and actions.
597
+
598
+ Returns:
599
+ If the response mode is blocking, it returns a `WorkflowsRunResponse` object containing the results of the
600
+ completed workflow.
601
+ If the response mode is streaming, it returns an iterator of `WorkflowsRunStreamResponse` objects
602
+ containing the stream of workflow events.
603
+ """
604
+ if req.response_mode == models.ResponseMode.BLOCKING:
605
+ return await self._arun_workflows(req, **kwargs)
606
+ if req.response_mode == models.ResponseMode.STREAMING:
607
+ return self._arun_workflows_stream(req, **kwargs)
608
+ raise ValueError(f"Invalid request_mode: {req.response_mode}")
609
+
610
+ async def _arun_workflows(self, req: models.WorkflowsRunRequest, **kwargs) -> models.WorkflowsRunResponse:
611
+ response = await self.arequest(
612
+ self._prepare_url(ENDPOINT_RUN_WORKFLOWS),
613
+ HTTPMethod.POST,
614
+ json=req.model_dump(),
615
+ **kwargs,
616
+ )
617
+ return models.WorkflowsRunResponse(**response.json())
618
+
619
+ async def _arun_workflows_stream(self, req: models.WorkflowsRunRequest, **kwargs) \
620
+ -> AsyncIterator[models.WorkflowsRunStreamResponse]:
621
+ async for sse in self.arequest_stream(
622
+ self._prepare_url(ENDPOINT_RUN_WORKFLOWS),
623
+ HTTPMethod.POST,
624
+ json=req.model_dump(),
625
+ **kwargs):
626
+ yield models.build_workflows_stream_response(sse.json())
627
+
628
+ async def astop_workflows(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
629
+ """
630
+ Sends a request to stop a streaming workflow task.
631
+
632
+ Returns:
633
+ A `StopResponse` object indicating the success of the operation.
634
+ """
635
+ return await self._astop_stream(self._prepare_url(ENDPOINT_STOP_WORKFLOWS, task_id=task_id), req, **kwargs)
636
+
637
+ async def _astop_stream(self, endpoint: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
638
+ response = await self.arequest(
639
+ endpoint,
640
+ HTTPMethod.POST,
641
+ json=req.model_dump(),
642
+ **kwargs,
643
+ )
644
+ return models.StopResponse(**response.json())
645
+
646
+ def _prepare_url(self, endpoint: str, **kwargs) -> str:
647
+ return self.api_base + endpoint.format(**kwargs)
648
+
649
+ def _prepare_auth_headers(self, headers: Dict[str, str]):
650
+ if "authorization" not in (key.lower() for key in headers.keys()):
651
+ headers["Authorization"] = f"Bearer {self.api_key}"
652
+
653
+
654
+ def _get_content_type(headers: httpx.Headers) -> str:
655
+ return headers.get("content-type", "").partition(";")[0]
656
+
657
+
658
+ def _check_stream_content_type(response: httpx.Response) -> bool:
659
+ content_type = _get_content_type(response.headers)
660
+ return response.is_success and "text/event-stream" in content_type
dify_client_python/build/lib/dify_client/errors.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from http import HTTPStatus
2
+ from typing import Union
3
+
4
+ import httpx
5
+ import httpx_sse
6
+
7
+ from dify_client import models
8
+
9
+
10
+ class DifyAPIError(Exception):
11
+ def __init__(self, status: int, code: str, message: str):
12
+ super().__init__(f"status_code={status}, code={code}, {message}")
13
+ self.status = status
14
+ self.code = code
15
+ self.message = message
16
+
17
+
18
+ class DifyInvalidParam(DifyAPIError):
19
+ pass
20
+
21
+
22
+ class DifyNotChatApp(DifyAPIError):
23
+ pass
24
+
25
+
26
+ class DifyResourceNotFound(DifyAPIError):
27
+ pass
28
+
29
+
30
+ class DifyAppUnavailable(DifyAPIError):
31
+ pass
32
+
33
+
34
+ class DifyProviderNotInitialize(DifyAPIError):
35
+ pass
36
+
37
+
38
+ class DifyProviderQuotaExceeded(DifyAPIError):
39
+ pass
40
+
41
+
42
+ class DifyModelCurrentlyNotSupport(DifyAPIError):
43
+ pass
44
+
45
+
46
+ class DifyCompletionRequestError(DifyAPIError):
47
+ pass
48
+
49
+
50
+ class DifyInternalServerError(DifyAPIError):
51
+ pass
52
+
53
+
54
+ class DifyNoFileUploaded(DifyAPIError):
55
+ pass
56
+
57
+
58
+ class DifyTooManyFiles(DifyAPIError):
59
+ pass
60
+
61
+
62
+ class DifyUnsupportedPreview(DifyAPIError):
63
+ pass
64
+
65
+
66
+ class DifyUnsupportedEstimate(DifyAPIError):
67
+ pass
68
+
69
+
70
+ class DifyFileTooLarge(DifyAPIError):
71
+ pass
72
+
73
+
74
+ class DifyUnsupportedFileType(DifyAPIError):
75
+ pass
76
+
77
+
78
+ class DifyS3ConnectionFailed(DifyAPIError):
79
+ pass
80
+
81
+
82
+ class DifyS3PermissionDenied(DifyAPIError):
83
+ pass
84
+
85
+
86
+ class DifyS3FileTooLarge(DifyAPIError):
87
+ pass
88
+
89
+
90
+ SPEC_CODE_ERRORS = {
91
+ # completion & chat & workflow
92
+ "invalid_param": DifyInvalidParam,
93
+ "not_chat_app": DifyNotChatApp,
94
+ "app_unavailable": DifyAppUnavailable,
95
+ "provider_not_initialize": DifyProviderNotInitialize,
96
+ "provider_quota_exceeded": DifyProviderQuotaExceeded,
97
+ "model_currently_not_support": DifyModelCurrentlyNotSupport,
98
+ "completion_request_error": DifyCompletionRequestError,
99
+ # files upload
100
+ "no_file_uploaded": DifyNoFileUploaded,
101
+ "too_many_files": DifyTooManyFiles,
102
+ "unsupported_preview": DifyUnsupportedPreview,
103
+ "unsupported_estimate": DifyUnsupportedEstimate,
104
+ "file_too_large": DifyFileTooLarge,
105
+ "unsupported_file_type": DifyUnsupportedFileType,
106
+ "s3_connection_failed": DifyS3ConnectionFailed,
107
+ "s3_permission_denied": DifyS3PermissionDenied,
108
+ "s3_file_too_large": DifyS3FileTooLarge,
109
+ }
110
+
111
+
112
+ def raise_for_status(response: Union[httpx.Response, httpx_sse.ServerSentEvent]):
113
+ if isinstance(response, httpx.Response):
114
+ if response.is_success:
115
+ return
116
+ json = response.json()
117
+ if "status" not in json:
118
+ json["status"] = response.status_code
119
+ details = models.ErrorResponse(**json)
120
+ elif isinstance(response, httpx_sse.ServerSentEvent):
121
+ if response.event != models.StreamEvent.ERROR.value:
122
+ return
123
+ details = models.ErrorStreamResponse(**response.json())
124
+ else:
125
+ raise ValueError(f"Invalid dify response type: {type(response)}")
126
+
127
+ if details.status == HTTPStatus.NOT_FOUND:
128
+ raise DifyResourceNotFound(details.status, details.code, details.message)
129
+ elif details.status == HTTPStatus.INTERNAL_SERVER_ERROR:
130
+ raise DifyInternalServerError(details.status, details.code, details.message)
131
+ else:
132
+ raise SPEC_CODE_ERRORS.get(details.code, DifyAPIError)(details.status, details.code, details.message)
dify_client_python/build/lib/dify_client/models/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from .chat import *
2
+ from .completion import *
3
+ from .feedback import *
4
+ from .file import *
5
+ from .workflow import *
6
+ from .stream import *
7
+ from .base import StopRequest, StopResponse
dify_client_python/build/lib/dify_client/models/base.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ from enum import StrEnum
3
+ except ImportError:
4
+ from strenum import StrEnum
5
+ from http import HTTPStatus
6
+ from typing import Optional, List
7
+
8
+ from pydantic import BaseModel, ConfigDict
9
+
10
+
11
+ class Mode(StrEnum):
12
+ CHAT = "chat"
13
+ COMPLETION = "completion"
14
+
15
+
16
+ class ResponseMode(StrEnum):
17
+ STREAMING = 'streaming'
18
+ BLOCKING = 'blocking'
19
+
20
+
21
+ class FileType(StrEnum):
22
+ IMAGE = "image"
23
+
24
+
25
+ class TransferMethod(StrEnum):
26
+ REMOTE_URL = "remote_url"
27
+ LOCAL_FILE = "local_file"
28
+
29
+
30
+ # Allows the entry of various variable values defined by the App.
31
+ # The inputs parameter contains multiple key/value pairs, with each key corresponding to a specific variable and
32
+ # each value being the specific value for that variable.
33
+ # The text generation application requires at least one key/value pair to be inputted.
34
+ class CompletionInputs(BaseModel):
35
+ model_config = ConfigDict(extra='allow')
36
+ # Required The input text, the content to be processed.
37
+ query: str
38
+
39
+
40
+ class File(BaseModel):
41
+ type: FileType
42
+ transfer_method: TransferMethod
43
+ url: Optional[str]
44
+ # Uploaded file ID, which must be obtained by uploading through the File Upload API in advance
45
+ # (when the transfer method is local_file)
46
+ upload_file_id: Optional[str]
47
+
48
+
49
+ class Usage(BaseModel):
50
+ prompt_tokens: int
51
+ completion_tokens: int
52
+ total_tokens: int
53
+
54
+ prompt_unit_price: str
55
+ prompt_price_unit: str
56
+ prompt_price: str
57
+ completion_unit_price: str
58
+ completion_price_unit: str
59
+ completion_price: str
60
+ total_price: str
61
+ currency: str
62
+
63
+ latency: float
64
+
65
+
66
+ class RetrieverResource(BaseModel):
67
+ position: int
68
+ dataset_id: str
69
+ dataset_name: str
70
+ document_id: str
71
+ document_name: str
72
+ segment_id: str
73
+ score: float
74
+ content: str
75
+
76
+
77
+ class Metadata(BaseModel):
78
+ usage: Usage
79
+ retriever_resources: List[RetrieverResource] = []
80
+
81
+
82
+ class StopRequest(BaseModel):
83
+ user: str
84
+
85
+
86
+ class StopResponse(BaseModel):
87
+ result: str # success
88
+
89
+
90
+ class ErrorResponse(BaseModel):
91
+ status: int = HTTPStatus.INTERNAL_SERVER_ERROR # HTTP status code
92
+ code: str = ""
93
+ message: str = ""
dify_client_python/build/lib/dify_client/models/chat.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Optional, Any
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+ from dify_client.models.base import ResponseMode, File
6
+ from dify_client.models.completion import CompletionResponse
7
+
8
+
9
+ class ChatRequest(BaseModel):
10
+ query: str
11
+ inputs: Dict[str, Any] = Field(default_factory=dict)
12
+ response_mode: ResponseMode
13
+ user: str
14
+ conversation_id: Optional[str] = ""
15
+ files: List[File] = []
16
+ auto_generate_name: bool = True
17
+
18
+
19
+ class ChatResponse(CompletionResponse):
20
+ pass
21
+
22
+
23
+ class ChatSuggestRequest(BaseModel):
24
+ user: str
25
+
26
+
27
+ class ChatSuggestResponse(BaseModel):
28
+ result: str
29
+ data: List[str] = []
dify_client_python/build/lib/dify_client/models/completion.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, List
2
+
3
+ from pydantic import BaseModel
4
+
5
+ from dify_client.models.base import CompletionInputs, ResponseMode, File, Metadata, Mode
6
+
7
+
8
+ class CompletionRequest(BaseModel):
9
+ inputs: CompletionInputs
10
+ response_mode: ResponseMode
11
+ user: str
12
+ conversation_id: Optional[str] = ""
13
+ files: List[File] = []
14
+
15
+
16
+ class CompletionResponse(BaseModel):
17
+ message_id: str
18
+ conversation_id: Optional[str] = ""
19
+ mode: Mode
20
+ answer: str
21
+ metadata: Metadata
22
+ created_at: int # unix timestamp seconds
dify_client_python/build/lib/dify_client/models/feedback.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ from enum import StrEnum
3
+ except ImportError:
4
+ from strenum import StrEnum
5
+ from typing import Optional
6
+
7
+ from pydantic import BaseModel
8
+
9
+
10
+ class Rating(StrEnum):
11
+ LIKE = "like"
12
+ DISLIKE = "dislike"
13
+
14
+
15
+ class FeedbackRequest(BaseModel):
16
+ rating: Optional[Rating] = None
17
+ user: str
18
+
19
+
20
+ class FeedbackResponse(BaseModel):
21
+ result: str # success
dify_client_python/build/lib/dify_client/models/file.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+
3
+
4
+ class UploadFileRequest(BaseModel):
5
+ user: str
6
+
7
+
8
+ class UploadFileResponse(BaseModel):
9
+ id: str
10
+ name: str
11
+ size: int
12
+ extension: str
13
+ mime_type: str
14
+ created_by: str # created by user
15
+ created_at: int # unix timestamp seconds
dify_client_python/build/lib/dify_client/models/stream.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ from enum import StrEnum
3
+ except ImportError:
4
+ from strenum import StrEnum
5
+ from typing import Union, Optional, List
6
+
7
+ from pydantic import BaseModel, ConfigDict, field_validator
8
+
9
+ from dify_client import utils
10
+ from dify_client.models.base import Metadata, ErrorResponse
11
+ from dify_client.models.workflow import WorkflowStartedData, WorkflowFinishedData, NodeStartedData, NodeFinishedData
12
+
13
+ STREAM_EVENT_KEY = "event"
14
+
15
+
16
+ class StreamEvent(StrEnum):
17
+ MESSAGE = "message"
18
+ AGENT_MESSAGE = "agent_message"
19
+ AGENT_THOUGHT = "agent_thought"
20
+ MESSAGE_FILE = "message_file" # need to show file
21
+ WORKFLOW_STARTED = "workflow_started"
22
+ NODE_STARTED = "node_started"
23
+ NODE_FINISHED = "node_finished"
24
+ WORKFLOW_FINISHED = "workflow_finished"
25
+ MESSAGE_END = "message_end"
26
+ MESSAGE_REPLACE = "message_replace"
27
+ ERROR = "error"
28
+ PING = "ping"
29
+
30
+ @classmethod
31
+ def new(cls, event: Union["StreamEvent", str]) -> "StreamEvent":
32
+ if isinstance(event, cls):
33
+ return event
34
+ return utils.str_to_enum(cls, event)
35
+
36
+
37
+ class StreamResponse(BaseModel):
38
+ model_config = ConfigDict(extra='allow')
39
+
40
+ event: StreamEvent
41
+ task_id: Optional[str] = ""
42
+
43
+ @field_validator("event", mode="before")
44
+ def transform_stream_event(cls, event: Union[StreamEvent, str]) -> StreamEvent:
45
+ return StreamEvent.new(event)
46
+
47
+
48
+ class PingResponse(StreamResponse):
49
+ pass
50
+
51
+
52
+ class ErrorStreamResponse(StreamResponse, ErrorResponse):
53
+ message_id: Optional[str] = ""
54
+
55
+
56
+ class MessageStreamResponse(StreamResponse):
57
+ message_id: str
58
+ conversation_id: Optional[str] = ""
59
+ answer: str
60
+ created_at: int # unix timestamp seconds
61
+
62
+
63
+ class MessageEndStreamResponse(StreamResponse):
64
+ message_id: str
65
+ conversation_id: Optional[str] = ""
66
+ created_at: int # unix timestamp seconds
67
+ metadata: Optional[Metadata]
68
+
69
+
70
+ class MessageReplaceStreamResponse(MessageStreamResponse):
71
+ pass
72
+
73
+
74
+ class AgentMessageStreamResponse(MessageStreamResponse):
75
+ pass
76
+
77
+
78
+ class AgentThoughtStreamResponse(StreamResponse):
79
+ id: str # agent thought id
80
+ message_id: str
81
+ conversation_id: str
82
+ position: int # thought position, start from 1
83
+ thought: str
84
+ observation: str
85
+ tool: str
86
+ tool_input: str
87
+ message_files: List[str] = []
88
+ created_at: int # unix timestamp seconds
89
+
90
+
91
+ class MessageFileStreamResponse(StreamResponse):
92
+ id: str # file id
93
+ conversation_id: str
94
+ type: str # only image
95
+ belongs_to: str # assistant
96
+ url: str
97
+
98
+
99
+ class WorkflowsStreamResponse(StreamResponse):
100
+ workflow_run_id: str
101
+ data: Optional[Union[
102
+ WorkflowStartedData,
103
+ WorkflowFinishedData,
104
+ NodeStartedData,
105
+ NodeFinishedData]
106
+ ]
107
+
108
+
109
+ class ChatWorkflowsStreamResponse(WorkflowsStreamResponse):
110
+ message_id: str
111
+ conversation_id: str
112
+ created_at: int
113
+
114
+
115
+ _COMPLETION_EVENT_TO_STREAM_RESP_MAPPING = {
116
+ StreamEvent.PING: PingResponse,
117
+ StreamEvent.MESSAGE: MessageStreamResponse,
118
+ StreamEvent.MESSAGE_END: MessageEndStreamResponse,
119
+ StreamEvent.MESSAGE_REPLACE: MessageReplaceStreamResponse,
120
+ }
121
+
122
+ CompletionStreamResponse = Union[
123
+ PingResponse,
124
+ MessageStreamResponse,
125
+ MessageEndStreamResponse,
126
+ MessageReplaceStreamResponse,
127
+ ]
128
+
129
+
130
+ def build_completion_stream_response(data: dict) -> CompletionStreamResponse:
131
+ event = StreamEvent.new(data.get(STREAM_EVENT_KEY))
132
+ return _COMPLETION_EVENT_TO_STREAM_RESP_MAPPING.get(event, StreamResponse)(**data)
133
+
134
+
135
+ _CHAT_EVENT_TO_STREAM_RESP_MAPPING = {
136
+ StreamEvent.PING: PingResponse,
137
+ # chat
138
+ StreamEvent.MESSAGE: MessageStreamResponse,
139
+ StreamEvent.MESSAGE_END: MessageEndStreamResponse,
140
+ StreamEvent.MESSAGE_REPLACE: MessageReplaceStreamResponse,
141
+ StreamEvent.MESSAGE_FILE: MessageFileStreamResponse,
142
+ # agent
143
+ StreamEvent.AGENT_MESSAGE: AgentMessageStreamResponse,
144
+ StreamEvent.AGENT_THOUGHT: AgentThoughtStreamResponse,
145
+ # workflow
146
+ StreamEvent.WORKFLOW_STARTED: WorkflowsStreamResponse,
147
+ StreamEvent.NODE_STARTED: WorkflowsStreamResponse,
148
+ StreamEvent.NODE_FINISHED: WorkflowsStreamResponse,
149
+ StreamEvent.WORKFLOW_FINISHED: WorkflowsStreamResponse,
150
+ }
151
+
152
+ ChatStreamResponse = Union[
153
+ PingResponse,
154
+ MessageStreamResponse,
155
+ MessageEndStreamResponse,
156
+ MessageReplaceStreamResponse,
157
+ MessageFileStreamResponse,
158
+ AgentMessageStreamResponse,
159
+ AgentThoughtStreamResponse,
160
+ WorkflowsStreamResponse,
161
+ ]
162
+
163
+
164
+ def build_chat_stream_response(data: dict) -> ChatStreamResponse:
165
+ event = StreamEvent.new(data.get(STREAM_EVENT_KEY))
166
+ return _CHAT_EVENT_TO_STREAM_RESP_MAPPING.get(event, StreamResponse)(**data)
167
+
168
+
169
+ _WORKFLOW_EVENT_TO_STREAM_RESP_MAPPING = {
170
+ StreamEvent.PING: PingResponse,
171
+ # workflow
172
+ StreamEvent.WORKFLOW_STARTED: WorkflowsStreamResponse,
173
+ StreamEvent.NODE_STARTED: WorkflowsStreamResponse,
174
+ StreamEvent.NODE_FINISHED: WorkflowsStreamResponse,
175
+ StreamEvent.WORKFLOW_FINISHED: WorkflowsStreamResponse,
176
+ }
177
+
178
+ WorkflowsRunStreamResponse = Union[
179
+ PingResponse,
180
+ WorkflowsStreamResponse,
181
+ ]
182
+
183
+
184
+ def build_workflows_stream_response(data: dict) -> WorkflowsRunStreamResponse:
185
+ event = StreamEvent.new(data.get(STREAM_EVENT_KEY))
186
+ return _WORKFLOW_EVENT_TO_STREAM_RESP_MAPPING.get(event, StreamResponse)(**data)
dify_client_python/build/lib/dify_client/models/workflow.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ from enum import StrEnum
3
+ except ImportError:
4
+ from strenum import StrEnum
5
+ from typing import Dict, List, Optional
6
+
7
+ from pydantic import BaseModel
8
+
9
+ from dify_client.models.base import ResponseMode, File
10
+
11
+
12
+ class WorkflowStatus(StrEnum):
13
+ RUNNING = "running"
14
+ SUCCEEDED = "succeeded"
15
+ FAILED = "failed"
16
+ STOPPED = "stopped"
17
+
18
+
19
+ class ExecutionMetadata(BaseModel):
20
+ total_tokens: Optional[int]
21
+ total_price: Optional[str]
22
+ currency: Optional[str]
23
+
24
+
25
+ class WorkflowStartedData(BaseModel):
26
+ id: str # workflow run id
27
+ workflow_id: str # workflow id
28
+ sequence_number: int
29
+ inputs: Optional[dict] = None
30
+ created_at: int # unix timestamp seconds
31
+
32
+
33
+ class NodeStartedData(BaseModel):
34
+ id: str # workflow run id
35
+ node_id: str
36
+ node_type: str
37
+ title: str
38
+ index: int
39
+ predecessor_node_id: Optional[str] = None
40
+ inputs: Optional[dict] = None
41
+ created_at: int
42
+ extras: dict = {}
43
+
44
+
45
+ class NodeFinishedData(BaseModel):
46
+ id: str # workflow run id
47
+ node_id: str
48
+ node_type: str
49
+ title: str
50
+ index: int
51
+ predecessor_node_id: Optional[str] = None
52
+ inputs: Optional[dict] = None
53
+ process_data: Optional[dict] = None
54
+ outputs: Optional[dict] = {}
55
+ status: WorkflowStatus
56
+ error: Optional[str] = None
57
+ elapsed_time: Optional[float] # seconds
58
+ execution_metadata: Optional[ExecutionMetadata] = None
59
+ created_at: int
60
+ finished_at: int
61
+ files: List = []
62
+
63
+
64
+ class WorkflowFinishedData(BaseModel):
65
+ id: str # workflow run id
66
+ workflow_id: str # workflow id
67
+ sequence_number: int
68
+ status: WorkflowStatus
69
+ outputs: Optional[dict]
70
+ error: Optional[str]
71
+ elapsed_time: Optional[float]
72
+ total_tokens: Optional[int]
73
+ total_steps: Optional[int] = 0
74
+ created_at: int
75
+ finished_at: int
76
+ created_by: dict = {}
77
+ files: List = []
78
+
79
+
80
+ class WorkflowsRunRequest(BaseModel):
81
+ inputs: Dict = {}
82
+ response_mode: ResponseMode
83
+ user: str
84
+ conversation_id: Optional[str] = ""
85
+ files: List[File] = []
86
+
87
+
88
+ class WorkflowsRunResponse(BaseModel):
89
+ log_id: str
90
+ task_id: str
91
+ data: WorkflowFinishedData
dify_client_python/build/lib/dify_client/utils/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from ._common import *
dify_client_python/build/lib/dify_client/utils/_common.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ def str_to_enum(str_enum_class, str_value: str, ignore_not_found: bool = False, enum_default=None):
2
+ for key, member in str_enum_class.__members__.items():
3
+ if str_value == member.value:
4
+ return member
5
+ if ignore_not_found:
6
+ return enum_default
7
+ raise ValueError(f"Invalid enum value: {str_value}")
dify_client_python/dify_client/.DS_Store ADDED
Binary file (6.15 kB). View file
 
dify_client_python/dify_client/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from ._clientx import Client, AsyncClient
dify_client_python/dify_client/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (231 Bytes). View file
 
dify_client_python/dify_client/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (228 Bytes). View file
 
dify_client_python/dify_client/__pycache__/_clientx.cpython-310.pyc ADDED
Binary file (21.6 kB). View file
 
dify_client_python/dify_client/__pycache__/_clientx.cpython-312.pyc ADDED
Binary file (37.3 kB). View file
 
dify_client_python/dify_client/__pycache__/errors.cpython-310.pyc ADDED
Binary file (4.33 kB). View file
 
dify_client_python/dify_client/__pycache__/errors.cpython-312.pyc ADDED
Binary file (6.12 kB). View file
 
dify_client_python/dify_client/_clientx.py ADDED
@@ -0,0 +1,694 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Any, Mapping, Iterator, AsyncIterator, Union, Dict, TypedDict
2
+
3
+ try:
4
+ from enum import StrEnum
5
+ except ImportError:
6
+ from strenum import StrEnum
7
+ try:
8
+ from http import HTTPMethod
9
+ except ImportError:
10
+ class HTTPMethod(StrEnum):
11
+ GET = "GET"
12
+ POST = "POST"
13
+ PUT = "PUT"
14
+ DELETE = "DELETE"
15
+
16
+ import httpx
17
+ # noinspection PyProtectedMember
18
+ import httpx._types as types
19
+ from pydantic import BaseModel
20
+
21
+ from dify_client_python.dify_client import errors, models
22
+
23
+ _httpx_client = httpx.Client()
24
+ _async_httpx_client = httpx.AsyncClient()
25
+
26
+ IGNORED_STREAM_EVENTS = (models.StreamEvent.PING.value,)
27
+
28
+ # feedback
29
+ ENDPOINT_FEEDBACKS = "/messages/{message_id}/feedbacks"
30
+ # suggest
31
+ ENDPOINT_SUGGESTED = "/messages/{message_id}/suggested"
32
+ # files upload
33
+ ENDPOINT_FILES_UPLOAD = "/files/upload"
34
+ # completion
35
+ ENDPOINT_COMPLETION_MESSAGES = "/completion-messages"
36
+ ENDPOINT_STOP_COMPLETION_MESSAGES = "/completion-messages/{task_id}/stop"
37
+ # chat
38
+ ENDPOINT_CHAT_MESSAGES = "/chat-messages"
39
+ ENDPOINT_STOP_CHAT_MESSAGES = "/chat-messages/{task_id}/stop"
40
+ # workflow
41
+ ENDPOINT_RUN_WORKFLOWS = "/workflows/run"
42
+ ENDPOINT_STOP_WORKFLOWS = "/workflows/{task_id}/stop"
43
+ # audio <-> text
44
+ ENDPOINT_TEXT_TO_AUDIO = "/text-to-audio"
45
+ ENDPOINT_AUDIO_TO_TEXT = "/audio-to-text"
46
+
47
+
48
+ class ServerSentEvent(TypedDict):
49
+ event: Optional[str]
50
+ data: str
51
+ id: Optional[str]
52
+ retry: Optional[int]
53
+
54
+
55
+ class Client(BaseModel):
56
+ api_key: str
57
+ api_base: Optional[str] = "https://api.dify.ai/v1"
58
+
59
+ def request(self, endpoint: str, method: str,
60
+ content: Optional[types.RequestContent] = None,
61
+ data: Optional[types.RequestData] = None,
62
+ files: Optional[types.RequestFiles] = None,
63
+ json: Optional[Any] = None,
64
+ params: Optional[types.QueryParamTypes] = None,
65
+ headers: Optional[Mapping[str, str]] = None,
66
+ **kwargs: object,
67
+ ) -> httpx.Response:
68
+ """
69
+ Sends a synchronous HTTP request to the specified endpoint.
70
+
71
+ Args:
72
+ endpoint: The API endpoint to send the request to.
73
+ method: The HTTP method to use (e.g., 'GET', 'POST').
74
+ content: Raw content to include in the request body.
75
+ data: Form data to include in the request body.
76
+ files: Files to include in the request body.
77
+ json: JSON data to include in the request body.
78
+ params: Query parameters to include in the request URL.
79
+ headers: Additional headers to include in the request.
80
+ **kwargs: Extra keyword arguments to pass to the request function.
81
+
82
+ Returns:
83
+ A `httpx.Response` object containing the HTTP response.
84
+
85
+ Raises:
86
+ Various DifyAPIError exceptions if the response contains an error.
87
+ """
88
+ merged_headers = {}
89
+ if headers:
90
+ merged_headers.update(headers)
91
+ self._prepare_auth_headers(merged_headers)
92
+
93
+ response = _httpx_client.request(method, endpoint, content=content, data=data, files=files, json=json,
94
+ params=params, headers=merged_headers, **kwargs)
95
+ errors.raise_for_status(response)
96
+ return response
97
+
98
+ def request_stream(self, endpoint: str, method: str,
99
+ content: Optional[types.RequestContent] = None,
100
+ data: Optional[types.RequestData] = None,
101
+ files: Optional[types.RequestFiles] = None,
102
+ json: Optional[Any] = None,
103
+ params: Optional[types.QueryParamTypes] = None,
104
+ headers: Optional[Mapping[str, str]] = None,
105
+ **kwargs,
106
+ ) -> Iterator[ServerSentEvent]:
107
+ """
108
+ Opens a server-sent events (SSE) stream to the specified endpoint.
109
+
110
+ Args:
111
+ endpoint: The API endpoint to send the request to.
112
+ method: The HTTP method to use (e.g., 'GET', 'POST').
113
+ content: Raw content to include in the request body.
114
+ data: Form data to include in the request body.
115
+ files: Files to include in the request body.
116
+ json: JSON data to include in the request body.
117
+ params: Query parameters to include in the request URL.
118
+ headers: Additional headers to include in the request.
119
+ **kwargs: Extra keyword arguments to pass to the request function.
120
+
121
+ Returns:
122
+ An iterator of `ServerSentEvent` objects representing the stream of events.
123
+
124
+ Raises:
125
+ Various DifyAPIError exceptions if an error event is received in the stream.
126
+ """
127
+ merged_headers = {
128
+ 'Accept': 'text/event-stream',
129
+ 'Cache-Control': 'no-cache',
130
+ 'Connection': 'keep-alive'
131
+ }
132
+ if headers:
133
+ merged_headers.update(headers)
134
+ self._prepare_auth_headers(merged_headers)
135
+
136
+ response = _httpx_client.stream(
137
+ method,
138
+ endpoint,
139
+ headers=merged_headers,
140
+ content=content,
141
+ data=data,
142
+ files=files,
143
+ json=json,
144
+ params=params,
145
+ **kwargs
146
+ )
147
+
148
+ with response as event_source:
149
+ if not _check_stream_content_type(event_source):
150
+ event_source.read()
151
+ errors.raise_for_status(event_source)
152
+ for line in event_source.iter_lines():
153
+ if not line:
154
+ continue
155
+ if line.startswith(b'data: '):
156
+ data = line[6:].decode('utf-8')
157
+ try:
158
+ json_data = json.loads(data)
159
+ event = {
160
+ 'event': json_data.get('event'),
161
+ 'data': data,
162
+ 'id': None,
163
+ 'retry': None
164
+ }
165
+ if event['event'] in IGNORED_STREAM_EVENTS:
166
+ continue
167
+ yield event
168
+ except json.JSONDecodeError:
169
+ continue
170
+
171
+ def feedback_messages(self, message_id: str, req: models.FeedbackRequest, **kwargs) -> models.FeedbackResponse:
172
+ """
173
+ Submits feedback for a specific message.
174
+
175
+ Args:
176
+ message_id: The identifier of the message to submit feedback for.
177
+ req: A `FeedbackRequest` object containing the feedback details, such as the rating.
178
+ **kwargs: Extra keyword arguments to pass to the request function.
179
+
180
+ Returns:
181
+ A `FeedbackResponse` object containing the result of the feedback submission.
182
+ """
183
+ response = self.request(
184
+ self._prepare_url(ENDPOINT_FEEDBACKS, message_id=message_id),
185
+ HTTPMethod.POST,
186
+ json=req.model_dump(),
187
+ **kwargs,
188
+ )
189
+ return models.FeedbackResponse(**response.json())
190
+
191
+ def suggest_messages(self, message_id: str, req: models.ChatSuggestRequest, **kwargs) -> models.ChatSuggestResponse:
192
+ """
193
+ Retrieves suggested messages based on a specific message.
194
+
195
+ Args:
196
+ message_id: The identifier of the message to get suggestions for.
197
+ req: A `ChatSuggestRequest` object containing the request details.
198
+ **kwargs: Extra keyword arguments to pass to the request function.
199
+
200
+ Returns:
201
+ A `ChatSuggestResponse` object containing suggested messages.
202
+ """
203
+ response = self.request(
204
+ self._prepare_url(ENDPOINT_SUGGESTED, message_id=message_id),
205
+ HTTPMethod.GET,
206
+ params=req.model_dump(),
207
+ **kwargs,
208
+ )
209
+ return models.ChatSuggestResponse(**response.json())
210
+
211
+ def upload_files(self, file: types.FileTypes, req: models.UploadFileRequest,
212
+ **kwargs) -> models.UploadFileResponse:
213
+ """
214
+ Uploads a file to be used in subsequent requests.
215
+
216
+ Args:
217
+ file: The file to upload. This can be a file-like object, or a tuple of
218
+ (`filename`, file-like object, mime_type).
219
+ req: An `UploadFileRequest` object containing the upload details, such as the user who is uploading.
220
+ **kwargs: Extra keyword arguments to pass to the request function.
221
+
222
+ Returns:
223
+ An `UploadFileResponse` object containing details about the uploaded file, such as its identifier and URL.
224
+ """
225
+ response = self.request(
226
+ self._prepare_url(ENDPOINT_FILES_UPLOAD),
227
+ HTTPMethod.POST,
228
+ data=req.model_dump(),
229
+ files=[("file", file)],
230
+ **kwargs,
231
+ )
232
+ return models.UploadFileResponse(**response.json())
233
+
234
+ def completion_messages(self, req: models.CompletionRequest, **kwargs) \
235
+ -> Union[models.CompletionResponse, Iterator[models.CompletionStreamResponse]]:
236
+ """
237
+ Sends a request to generate a completion or a series of completions based on the provided input.
238
+
239
+ Returns:
240
+ If the response mode is blocking, it returns a `CompletionResponse` object containing the generated message.
241
+ If the response mode is streaming, it returns an iterator of `CompletionStreamResponse` objects containing
242
+ the stream of generated events.
243
+ """
244
+ if req.response_mode == models.ResponseMode.BLOCKING:
245
+ return self._completion_messages(req, **kwargs)
246
+ if req.response_mode == models.ResponseMode.STREAMING:
247
+ return self._completion_messages_stream(req, **kwargs)
248
+ raise ValueError(f"Invalid request_mode: {req.response_mode}")
249
+
250
+ def _completion_messages(self, req: models.CompletionRequest, **kwargs) -> models.CompletionResponse:
251
+ response = self.request(
252
+ self._prepare_url(ENDPOINT_COMPLETION_MESSAGES),
253
+ HTTPMethod.POST,
254
+ json=req.model_dump(),
255
+ **kwargs,
256
+ )
257
+ return models.CompletionResponse(**response.json())
258
+
259
+ def _completion_messages_stream(self, req: models.CompletionRequest, **kwargs) \
260
+ -> Iterator[models.CompletionStreamResponse]:
261
+ event_source = self.request_stream(
262
+ self._prepare_url(ENDPOINT_COMPLETION_MESSAGES),
263
+ HTTPMethod.POST,
264
+ json=req.model_dump(),
265
+ **kwargs,
266
+ )
267
+ for sse in event_source:
268
+ yield models.build_completion_stream_response(sse.json())
269
+
270
+ def stop_completion_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
271
+ """
272
+ Sends a request to stop a streaming completion task.
273
+
274
+ Returns:
275
+ A `StopResponse` object indicating the success of the operation.
276
+ """
277
+ return self._stop_stream(self._prepare_url(ENDPOINT_STOP_COMPLETION_MESSAGES, task_id=task_id), req, **kwargs)
278
+
279
+ def chat_messages(self, req: models.ChatRequest, **kwargs) \
280
+ -> Union[models.ChatResponse, Iterator[models.ChatStreamResponse]]:
281
+ """
282
+ Sends a request to generate a chat message or a series of chat messages based on the provided input.
283
+
284
+ Returns:
285
+ If the response mode is blocking, it returns a `ChatResponse` object containing the generated chat message.
286
+ If the response mode is streaming, it returns an iterator of `ChatStreamResponse` objects containing the
287
+ stream of chat events.
288
+ """
289
+ if req.response_mode == models.ResponseMode.BLOCKING:
290
+ return self._chat_messages(req, **kwargs)
291
+ if req.response_mode == models.ResponseMode.STREAMING:
292
+ return self._chat_messages_stream(req, **kwargs)
293
+ raise ValueError(f"Invalid request_mode: {req.response_mode}")
294
+
295
+ def _chat_messages(self, req: models.ChatRequest, **kwargs) -> models.ChatResponse:
296
+ response = self.request(
297
+ self._prepare_url(ENDPOINT_CHAT_MESSAGES),
298
+ HTTPMethod.POST,
299
+ json=req.model_dump(),
300
+ **kwargs,
301
+ )
302
+ return models.ChatResponse(**response.json())
303
+
304
+ def _chat_messages_stream(self, req: models.ChatRequest, **kwargs) -> Iterator[models.ChatStreamResponse]:
305
+ event_source = self.request_stream(
306
+ self._prepare_url(ENDPOINT_CHAT_MESSAGES),
307
+ HTTPMethod.POST,
308
+ json=req.model_dump(),
309
+ **kwargs,
310
+ )
311
+ for sse in event_source:
312
+ yield models.build_chat_stream_response(sse.json())
313
+
314
+ def stop_chat_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
315
+ """
316
+ Sends a request to stop a streaming chat task.
317
+
318
+ Returns:
319
+ A `StopResponse` object indicating the success of the operation.
320
+ """
321
+ return self._stop_stream(self._prepare_url(ENDPOINT_STOP_CHAT_MESSAGES, task_id=task_id), req, **kwargs)
322
+
323
+ def run_workflows(self, req: models.WorkflowsRunRequest, **kwargs) \
324
+ -> Union[models.WorkflowsRunResponse, Iterator[models.WorkflowsRunStreamResponse]]:
325
+ """
326
+ Initiates the execution of a workflow, which can consist of multiple steps and actions.
327
+
328
+ Returns:
329
+ If the response mode is blocking, it returns a `WorkflowsRunResponse` object containing the results of the
330
+ completed workflow.
331
+ If the response mode is streaming, it returns an iterator of `WorkflowsRunStreamResponse` objects
332
+ containing the stream of workflow events.
333
+ """
334
+ if req.response_mode == models.ResponseMode.BLOCKING:
335
+ return self._run_workflows(req, **kwargs)
336
+ if req.response_mode == models.ResponseMode.STREAMING:
337
+ return self._run_workflows_stream(req, **kwargs)
338
+ raise ValueError(f"Invalid request_mode: {req.response_mode}")
339
+
340
+ def _run_workflows(self, req: models.WorkflowsRunRequest, **kwargs) -> models.WorkflowsRunResponse:
341
+ response = self.request(
342
+ self._prepare_url(ENDPOINT_RUN_WORKFLOWS),
343
+ HTTPMethod.POST,
344
+ json=req.model_dump(),
345
+ **kwargs,
346
+ )
347
+ return models.WorkflowsRunResponse(**response.json())
348
+
349
+ def _run_workflows_stream(self, req: models.WorkflowsRunRequest, **kwargs) \
350
+ -> Iterator[models.WorkflowsRunStreamResponse]:
351
+ event_source = self.request_stream(
352
+ self._prepare_url(ENDPOINT_RUN_WORKFLOWS),
353
+ HTTPMethod.POST,
354
+ json=req.model_dump(),
355
+ **kwargs,
356
+ )
357
+ for sse in event_source:
358
+ yield models.build_workflows_stream_response(sse.json())
359
+
360
+ def stop_workflows(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
361
+ """
362
+ Sends a request to stop a streaming workflow task.
363
+
364
+ Returns:
365
+ A `StopResponse` object indicating the success of the operation.
366
+ """
367
+ return self._stop_stream(self._prepare_url(ENDPOINT_STOP_WORKFLOWS, task_id=task_id), req, **kwargs)
368
+
369
+ def _stop_stream(self, endpoint: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
370
+ response = self.request(
371
+ endpoint,
372
+ HTTPMethod.POST,
373
+ json=req.model_dump(),
374
+ **kwargs,
375
+ )
376
+ return models.StopResponse(**response.json())
377
+
378
+ def _prepare_url(self, endpoint: str, **kwargs) -> str:
379
+ return self.api_base + endpoint.format(**kwargs)
380
+
381
+ def _prepare_auth_headers(self, headers: Dict[str, str]):
382
+ if "authorization" not in (key.lower() for key in headers.keys()):
383
+ headers["Authorization"] = f"Bearer {self.api_key}"
384
+
385
+
386
+ class AsyncClient(BaseModel):
387
+ api_key: str
388
+ api_base: Optional[str] = "https://api.dify.ai/v1"
389
+
390
+ async def arequest(self, endpoint: str, method: str,
391
+ content: Optional[types.RequestContent] = None,
392
+ data: Optional[types.RequestData] = None,
393
+ files: Optional[types.RequestFiles] = None,
394
+ json: Optional[Any] = None,
395
+ params: Optional[types.QueryParamTypes] = None,
396
+ headers: Optional[Mapping[str, str]] = None,
397
+ **kwargs,
398
+ ) -> httpx.Response:
399
+ """
400
+ Asynchronously sends a request to the specified Dify API endpoint.
401
+
402
+ Args:
403
+ endpoint: The endpoint URL to which the request is sent.
404
+ method: The HTTP method to be used for the request (e.g., 'GET', 'POST').
405
+ content: Raw content to include in the request body, if any.
406
+ data: Form data to be sent in the request body.
407
+ files: Files to be uploaded with the request.
408
+ json: JSON data to be sent in the request body.
409
+ params: Query parameters to be included in the request URL.
410
+ headers: Additional headers to be sent with the request.
411
+ **kwargs: Extra keyword arguments to be passed to the underlying HTTPX request function.
412
+
413
+ Returns:
414
+ A httpx.Response object containing the server's response to the HTTP request.
415
+
416
+ Raises:
417
+ Various DifyAPIError exceptions if the response contains an error.
418
+ """
419
+ merged_headers = {}
420
+ if headers:
421
+ merged_headers.update(headers)
422
+ self._prepare_auth_headers(merged_headers)
423
+
424
+ response = await _async_httpx_client.request(method, endpoint, content=content, data=data, files=files,
425
+ json=json, params=params, headers=merged_headers, **kwargs)
426
+ errors.raise_for_status(response)
427
+ return response
428
+
429
+ async def arequest_stream(self, endpoint: str, method: str,
430
+ content: Optional[types.RequestContent] = None,
431
+ data: Optional[types.RequestData] = None,
432
+ files: Optional[types.RequestFiles] = None,
433
+ json: Optional[Any] = None,
434
+ params: Optional[types.QueryParamTypes] = None,
435
+ headers: Optional[Mapping[str, str]] = None,
436
+ **kwargs,
437
+ ) -> AsyncIterator[ServerSentEvent]:
438
+ """
439
+ Asynchronously establishes a streaming connection to the specified Dify API endpoint.
440
+
441
+ Args:
442
+ endpoint: The endpoint URL to which the request is sent.
443
+ method: The HTTP method to be used for the request (e.g., 'GET', 'POST').
444
+ content: Raw content to include in the request body, if any.
445
+ data: Form data to be sent in the request body.
446
+ files: Files to be uploaded with the request.
447
+ json: JSON data to be sent in the request body.
448
+ params: Query parameters to be included in the request URL.
449
+ headers: Additional headers to be sent with the request.
450
+ **kwargs: Extra keyword arguments to be passed to the underlying HTTPX request function.
451
+
452
+ Yields:
453
+ ServerSentEvent objects representing the events received from the server.
454
+
455
+ Raises:
456
+ Various DifyAPIError exceptions if an error event is received in the stream.
457
+ """
458
+ merged_headers = {}
459
+ if headers:
460
+ merged_headers.update(headers)
461
+ self._prepare_auth_headers(merged_headers)
462
+
463
+ async with aconnect_sse(_async_httpx_client, method, endpoint, headers=merged_headers,
464
+ content=content, data=data, files=files, json=json, params=params,
465
+ **kwargs) as event_source:
466
+ if not _check_stream_content_type(event_source.response):
467
+ await event_source.response.aread()
468
+ errors.raise_for_status(event_source.response)
469
+ async for sse in event_source.aiter_sse():
470
+ errors.raise_for_status(sse)
471
+ if sse.event in IGNORED_STREAM_EVENTS or sse.data in IGNORED_STREAM_EVENTS:
472
+ continue
473
+ yield sse
474
+
475
+ async def afeedback_messages(self, message_id: str, req: models.FeedbackRequest, **kwargs) \
476
+ -> models.FeedbackResponse:
477
+ """
478
+ Submits feedback for a specific message.
479
+
480
+ Args:
481
+ message_id: The identifier of the message to submit feedback for.
482
+ req: A `FeedbackRequest` object containing the feedback details, such as the rating.
483
+ **kwargs: Extra keyword arguments to pass to the request function.
484
+
485
+ Returns:
486
+ A `FeedbackResponse` object containing the result of the feedback submission.
487
+ """
488
+ response = await self.arequest(
489
+ self._prepare_url(ENDPOINT_FEEDBACKS, message_id=message_id),
490
+ HTTPMethod.POST,
491
+ json=req.model_dump(),
492
+ **kwargs,
493
+ )
494
+ return models.FeedbackResponse(**response.json())
495
+
496
+ async def asuggest_messages(self, message_id: str, req: models.ChatSuggestRequest, **kwargs) \
497
+ -> models.ChatSuggestResponse:
498
+ """
499
+ Retrieves suggested messages based on a specific message.
500
+
501
+ Args:
502
+ message_id: The identifier of the message to get suggestions for.
503
+ req: A `ChatSuggestRequest` object containing the request details.
504
+ **kwargs: Extra keyword arguments to pass to the request function.
505
+
506
+ Returns:
507
+ A `ChatSuggestResponse` object containing suggested messages.
508
+ """
509
+ response = await self.arequest(
510
+ self._prepare_url(ENDPOINT_SUGGESTED, message_id=message_id),
511
+ HTTPMethod.GET,
512
+ params=req.model_dump(),
513
+ **kwargs,
514
+ )
515
+ return models.ChatSuggestResponse(**response.json())
516
+
517
+ async def aupload_files(self, file: types.FileTypes, req: models.UploadFileRequest, **kwargs) \
518
+ -> models.UploadFileResponse:
519
+ """
520
+ Uploads a file to be used in subsequent requests.
521
+
522
+ Args:
523
+ file: The file to upload. This can be a file-like object, or a tuple of
524
+ (`filename`, file-like object, mime_type).
525
+ req: An `UploadFileRequest` object containing the upload details, such as the user who is uploading.
526
+ **kwargs: Extra keyword arguments to pass to the request function.
527
+
528
+ Returns:
529
+ An `UploadFileResponse` object containing details about the uploaded file, such as its identifier and URL.
530
+ """
531
+ response = await self.arequest(
532
+ self._prepare_url(ENDPOINT_FILES_UPLOAD),
533
+ HTTPMethod.POST,
534
+ data=req.model_dump(),
535
+ files=[("file", file)],
536
+ **kwargs,
537
+ )
538
+ return models.UploadFileResponse(**response.json())
539
+
540
+ async def acompletion_messages(self, req: models.CompletionRequest, **kwargs) \
541
+ -> Union[models.CompletionResponse, AsyncIterator[models.CompletionStreamResponse]]:
542
+ """
543
+ Sends a request to generate a completion or a series of completions based on the provided input.
544
+
545
+ Returns:
546
+ If the response mode is blocking, it returns a `CompletionResponse` object containing the generated message.
547
+ If the response mode is streaming, it returns an iterator of `CompletionStreamResponse` objects containing
548
+ the stream of generated events.
549
+ """
550
+ if req.response_mode == models.ResponseMode.BLOCKING:
551
+ return await self._acompletion_messages(req, **kwargs)
552
+ if req.response_mode == models.ResponseMode.STREAMING:
553
+ return self._acompletion_messages_stream(req, **kwargs)
554
+ raise ValueError(f"Invalid request_mode: {req.response_mode}")
555
+
556
+ async def _acompletion_messages(self, req: models.CompletionRequest, **kwargs) -> models.CompletionResponse:
557
+ response = await self.arequest(
558
+ self._prepare_url(ENDPOINT_COMPLETION_MESSAGES),
559
+ HTTPMethod.POST,
560
+ json=req.model_dump(),
561
+ **kwargs,
562
+ )
563
+ return models.CompletionResponse(**response.json())
564
+
565
+ async def _acompletion_messages_stream(self, req: models.CompletionRequest, **kwargs) \
566
+ -> AsyncIterator[models.CompletionStreamResponse]:
567
+ async for sse in self.arequest_stream(
568
+ self._prepare_url(ENDPOINT_COMPLETION_MESSAGES),
569
+ HTTPMethod.POST,
570
+ json=req.model_dump(),
571
+ **kwargs):
572
+ yield models.build_completion_stream_response(sse.json())
573
+
574
+ async def astop_completion_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
575
+ """
576
+ Sends a request to stop a streaming completion task.
577
+
578
+ Returns:
579
+ A `StopResponse` object indicating the success of the operation.
580
+ """
581
+ return await self._astop_stream(
582
+ self._prepare_url(ENDPOINT_STOP_COMPLETION_MESSAGES, task_id=task_id), req, **kwargs)
583
+
584
+ async def achat_messages(self, req: models.ChatRequest, **kwargs) \
585
+ -> Union[models.ChatResponse, AsyncIterator[models.ChatStreamResponse]]:
586
+ """
587
+ Sends a request to generate a chat message or a series of chat messages based on the provided input.
588
+
589
+ Returns:
590
+ If the response mode is blocking, it returns a `ChatResponse` object containing the generated chat message.
591
+ If the response mode is streaming, it returns an iterator of `ChatStreamResponse` objects containing the
592
+ stream of chat events.
593
+ """
594
+ if req.response_mode == models.ResponseMode.BLOCKING:
595
+ return await self._achat_messages(req, **kwargs)
596
+ if req.response_mode == models.ResponseMode.STREAMING:
597
+ return self._achat_messages_stream(req, **kwargs)
598
+ raise ValueError(f"Invalid request_mode: {req.response_mode}")
599
+
600
+ async def _achat_messages(self, req: models.ChatRequest, **kwargs) -> models.ChatResponse:
601
+ response = await self.arequest(
602
+ self._prepare_url(ENDPOINT_CHAT_MESSAGES),
603
+ HTTPMethod.POST,
604
+ json=req.model_dump(),
605
+ **kwargs,
606
+ )
607
+ return models.ChatResponse(**response.json())
608
+
609
+ async def _achat_messages_stream(self, req: models.ChatRequest, **kwargs) \
610
+ -> AsyncIterator[models.ChatStreamResponse]:
611
+ async for sse in self.arequest_stream(
612
+ self._prepare_url(ENDPOINT_CHAT_MESSAGES),
613
+ HTTPMethod.POST,
614
+ json=req.model_dump(),
615
+ **kwargs):
616
+ yield models.build_chat_stream_response(sse.json())
617
+
618
+ async def astop_chat_messages(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
619
+ """
620
+ Sends a request to stop a streaming chat task.
621
+
622
+ Returns:
623
+ A `StopResponse` object indicating the success of the operation.
624
+ """
625
+ return await self._astop_stream(self._prepare_url(ENDPOINT_STOP_CHAT_MESSAGES, task_id=task_id), req, **kwargs)
626
+
627
+ async def arun_workflows(self, req: models.WorkflowsRunRequest, **kwargs) \
628
+ -> Union[models.WorkflowsRunResponse, AsyncIterator[models.WorkflowsStreamResponse]]:
629
+ """
630
+ Initiates the execution of a workflow, which can consist of multiple steps and actions.
631
+
632
+ Returns:
633
+ If the response mode is blocking, it returns a `WorkflowsRunResponse` object containing the results of the
634
+ completed workflow.
635
+ If the response mode is streaming, it returns an iterator of `WorkflowsRunStreamResponse` objects
636
+ containing the stream of workflow events.
637
+ """
638
+ if req.response_mode == models.ResponseMode.BLOCKING:
639
+ return await self._arun_workflows(req, **kwargs)
640
+ if req.response_mode == models.ResponseMode.STREAMING:
641
+ return self._arun_workflows_stream(req, **kwargs)
642
+ raise ValueError(f"Invalid request_mode: {req.response_mode}")
643
+
644
+ async def _arun_workflows(self, req: models.WorkflowsRunRequest, **kwargs) -> models.WorkflowsRunResponse:
645
+ response = await self.arequest(
646
+ self._prepare_url(ENDPOINT_RUN_WORKFLOWS),
647
+ HTTPMethod.POST,
648
+ json=req.model_dump(),
649
+ **kwargs,
650
+ )
651
+ return models.WorkflowsRunResponse(**response.json())
652
+
653
+ async def _arun_workflows_stream(self, req: models.WorkflowsRunRequest, **kwargs) \
654
+ -> AsyncIterator[models.WorkflowsRunStreamResponse]:
655
+ async for sse in self.arequest_stream(
656
+ self._prepare_url(ENDPOINT_RUN_WORKFLOWS),
657
+ HTTPMethod.POST,
658
+ json=req.model_dump(),
659
+ **kwargs):
660
+ yield models.build_workflows_stream_response(sse.json())
661
+
662
+ async def astop_workflows(self, task_id: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
663
+ """
664
+ Sends a request to stop a streaming workflow task.
665
+
666
+ Returns:
667
+ A `StopResponse` object indicating the success of the operation.
668
+ """
669
+ return await self._astop_stream(self._prepare_url(ENDPOINT_STOP_WORKFLOWS, task_id=task_id), req, **kwargs)
670
+
671
+ async def _astop_stream(self, endpoint: str, req: models.StopRequest, **kwargs) -> models.StopResponse:
672
+ response = await self.arequest(
673
+ endpoint,
674
+ HTTPMethod.POST,
675
+ json=req.model_dump(),
676
+ **kwargs,
677
+ )
678
+ return models.StopResponse(**response.json())
679
+
680
+ def _prepare_url(self, endpoint: str, **kwargs) -> str:
681
+ return self.api_base + endpoint.format(**kwargs)
682
+
683
+ def _prepare_auth_headers(self, headers: Dict[str, str]):
684
+ if "authorization" not in (key.lower() for key in headers.keys()):
685
+ headers["Authorization"] = f"Bearer {self.api_key}"
686
+
687
+
688
+ def _get_content_type(headers: httpx.Headers) -> str:
689
+ return headers.get("content-type", "").partition(";")[0]
690
+
691
+
692
+ def _check_stream_content_type(response: httpx.Response) -> bool:
693
+ content_type = _get_content_type(response.headers)
694
+ return response.is_success and "text/event-stream" in content_type
dify_client_python/dify_client/errors.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from http import HTTPStatus
2
+ from typing import Union
3
+
4
+ import httpx
5
+ from pydantic import BaseModel
6
+
7
+ from dify_client_python.dify_client import models
8
+
9
+
10
+ class DifyAPIError(Exception):
11
+ def __init__(self, status: int, code: str, message: str):
12
+ super().__init__(f"status_code={status}, code={code}, {message}")
13
+ self.status = status
14
+ self.code = code
15
+ self.message = message
16
+
17
+
18
+ class DifyInvalidParam(DifyAPIError):
19
+ pass
20
+
21
+
22
+ class DifyNotChatApp(DifyAPIError):
23
+ pass
24
+
25
+
26
+ class DifyResourceNotFound(DifyAPIError):
27
+ pass
28
+
29
+
30
+ class DifyAppUnavailable(DifyAPIError):
31
+ pass
32
+
33
+
34
+ class DifyProviderNotInitialize(DifyAPIError):
35
+ pass
36
+
37
+
38
+ class DifyProviderQuotaExceeded(DifyAPIError):
39
+ pass
40
+
41
+
42
+ class DifyModelCurrentlyNotSupport(DifyAPIError):
43
+ pass
44
+
45
+
46
+ class DifyCompletionRequestError(DifyAPIError):
47
+ pass
48
+
49
+
50
+ class DifyInternalServerError(DifyAPIError):
51
+ pass
52
+
53
+
54
+ class DifyNoFileUploaded(DifyAPIError):
55
+ pass
56
+
57
+
58
+ class DifyTooManyFiles(DifyAPIError):
59
+ pass
60
+
61
+
62
+ class DifyUnsupportedPreview(DifyAPIError):
63
+ pass
64
+
65
+
66
+ class DifyUnsupportedEstimate(DifyAPIError):
67
+ pass
68
+
69
+
70
+ class DifyFileTooLarge(DifyAPIError):
71
+ pass
72
+
73
+
74
+ class DifyUnsupportedFileType(DifyAPIError):
75
+ pass
76
+
77
+
78
+ class DifyS3ConnectionFailed(DifyAPIError):
79
+ pass
80
+
81
+
82
+ class DifyS3PermissionDenied(DifyAPIError):
83
+ pass
84
+
85
+
86
+ class DifyS3FileTooLarge(DifyAPIError):
87
+ pass
88
+
89
+
90
+ SPEC_CODE_ERRORS = {
91
+ # completion & chat & workflow
92
+ "invalid_param": DifyInvalidParam,
93
+ "not_chat_app": DifyNotChatApp,
94
+ "app_unavailable": DifyAppUnavailable,
95
+ "provider_not_initialize": DifyProviderNotInitialize,
96
+ "provider_quota_exceeded": DifyProviderQuotaExceeded,
97
+ "model_currently_not_support": DifyModelCurrentlyNotSupport,
98
+ "completion_request_error": DifyCompletionRequestError,
99
+ # files upload
100
+ "no_file_uploaded": DifyNoFileUploaded,
101
+ "too_many_files": DifyTooManyFiles,
102
+ "unsupported_preview": DifyUnsupportedPreview,
103
+ "unsupported_estimate": DifyUnsupportedEstimate,
104
+ "file_too_large": DifyFileTooLarge,
105
+ "unsupported_file_type": DifyUnsupportedFileType,
106
+ "s3_connection_failed": DifyS3ConnectionFailed,
107
+ "s3_permission_denied": DifyS3PermissionDenied,
108
+ "s3_file_too_large": DifyS3FileTooLarge,
109
+ }
110
+
111
+
112
+ def raise_for_status(response: Union[httpx.Response, BaseModel]):
113
+ if isinstance(response, httpx.Response):
114
+ if response.is_success:
115
+ return
116
+ json = response.json()
117
+ if "status" not in json:
118
+ json["status"] = response.status_code
119
+ details = models.ErrorResponse(**json)
120
+ elif isinstance(response, BaseModel):
121
+ if not hasattr(response, 'event') or response.event != models.StreamEvent.ERROR.value:
122
+ return
123
+ details = models.ErrorStreamResponse(**response.dict())
124
+ else:
125
+ raise ValueError(f"Invalid dify response type: {type(response)}")
126
+
127
+ if details.status == HTTPStatus.NOT_FOUND:
128
+ raise DifyResourceNotFound(details.status, details.code, details.message)
129
+ elif details.status == HTTPStatus.INTERNAL_SERVER_ERROR:
130
+ raise DifyInternalServerError(details.status, details.code, details.message)
131
+ else:
132
+ raise SPEC_CODE_ERRORS.get(details.code, DifyAPIError)(
133
+ details.status, details.code, details.message
134
+ )
dify_client_python/dify_client/models/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from .chat import *
2
+ from .completion import *
3
+ from .feedback import *
4
+ from .file import *
5
+ from .workflow import *
6
+ from .stream import *
7
+ from .base import StopRequest, StopResponse
dify_client_python/dify_client/models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (357 Bytes). View file
 
dify_client_python/dify_client/models/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (372 Bytes). View file
 
dify_client_python/dify_client/models/__pycache__/base.cpython-310.pyc ADDED
Binary file (3.17 kB). View file
 
dify_client_python/dify_client/models/__pycache__/base.cpython-312.pyc ADDED
Binary file (3.77 kB). View file
 
dify_client_python/dify_client/models/__pycache__/chat.cpython-310.pyc ADDED
Binary file (1.49 kB). View file
 
dify_client_python/dify_client/models/__pycache__/chat.cpython-312.pyc ADDED
Binary file (1.67 kB). View file
 
dify_client_python/dify_client/models/__pycache__/completion.cpython-310.pyc ADDED
Binary file (1.09 kB). View file
 
dify_client_python/dify_client/models/__pycache__/completion.cpython-312.pyc ADDED
Binary file (1.21 kB). View file
 
dify_client_python/dify_client/models/__pycache__/feedback.cpython-310.pyc ADDED
Binary file (955 Bytes). View file
 
dify_client_python/dify_client/models/__pycache__/feedback.cpython-312.pyc ADDED
Binary file (1.07 kB). View file
 
dify_client_python/dify_client/models/__pycache__/file.cpython-310.pyc ADDED
Binary file (716 Bytes). View file
 
dify_client_python/dify_client/models/__pycache__/file.cpython-312.pyc ADDED
Binary file (784 Bytes). View file
 
dify_client_python/dify_client/models/__pycache__/stream.cpython-310.pyc ADDED
Binary file (5.4 kB). View file
 
dify_client_python/dify_client/models/__pycache__/stream.cpython-312.pyc ADDED
Binary file (7.46 kB). View file
 
dify_client_python/dify_client/models/__pycache__/workflow.cpython-310.pyc ADDED
Binary file (3.23 kB). View file
 
dify_client_python/dify_client/models/__pycache__/workflow.cpython-312.pyc ADDED
Binary file (3.92 kB). View file
 
dify_client_python/dify_client/models/base.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ from enum import StrEnum
3
+ except ImportError:
4
+ from strenum import StrEnum
5
+ from http import HTTPStatus
6
+ from typing import Optional, List
7
+
8
+ from pydantic import BaseModel, ConfigDict
9
+
10
+
11
+ class Mode(StrEnum):
12
+ CHAT = "chat"
13
+ COMPLETION = "completion"
14
+
15
+
16
+ class ResponseMode(StrEnum):
17
+ STREAMING = 'streaming'
18
+ BLOCKING = 'blocking'
19
+
20
+
21
+ class FileType(StrEnum):
22
+ IMAGE = "image"
23
+
24
+
25
+ class TransferMethod(StrEnum):
26
+ REMOTE_URL = "remote_url"
27
+ LOCAL_FILE = "local_file"
28
+
29
+
30
+ # Allows the entry of various variable values defined by the App.
31
+ # The inputs parameter contains multiple key/value pairs, with each key corresponding to a specific variable and
32
+ # each value being the specific value for that variable.
33
+ # The text generation application requires at least one key/value pair to be inputted.
34
+ class CompletionInputs(BaseModel):
35
+ model_config = ConfigDict(extra='allow')
36
+ # Required The input text, the content to be processed.
37
+ query: str
38
+
39
+
40
+ class File(BaseModel):
41
+ type: FileType
42
+ transfer_method: TransferMethod
43
+ url: Optional[str]
44
+ # Uploaded file ID, which must be obtained by uploading through the File Upload API in advance
45
+ # (when the transfer method is local_file)
46
+ upload_file_id: Optional[str]
47
+
48
+
49
+ class Usage(BaseModel):
50
+ prompt_tokens: int
51
+ completion_tokens: int
52
+ total_tokens: int
53
+
54
+ prompt_unit_price: str
55
+ prompt_price_unit: str
56
+ prompt_price: str
57
+ completion_unit_price: str
58
+ completion_price_unit: str
59
+ completion_price: str
60
+ total_price: str
61
+ currency: str
62
+
63
+ latency: float
64
+
65
+
66
+ class RetrieverResource(BaseModel):
67
+ position: int
68
+ dataset_id: str
69
+ dataset_name: str
70
+ document_id: str
71
+ document_name: str
72
+ segment_id: str
73
+ score: float
74
+ content: str
75
+
76
+
77
+ class Metadata(BaseModel):
78
+ usage: Usage
79
+ retriever_resources: List[RetrieverResource] = []
80
+
81
+
82
+ class StopRequest(BaseModel):
83
+ user: str
84
+
85
+
86
+ class StopResponse(BaseModel):
87
+ result: str # success
88
+
89
+
90
+ class ErrorResponse(BaseModel):
91
+ status: int = HTTPStatus.INTERNAL_SERVER_ERROR # HTTP status code
92
+ code: str = ""
93
+ message: str = ""
dify_client_python/dify_client/models/chat.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Optional, Any
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+ from dify_client_python.dify_client.models.base import ResponseMode, File
6
+ from dify_client_python.dify_client.models.completion import CompletionResponse
7
+
8
+
9
+ class ChatRequest(BaseModel):
10
+ query: str
11
+ inputs: Dict[str, Any] = Field(default_factory=dict)
12
+ response_mode: ResponseMode
13
+ user: str
14
+ conversation_id: Optional[str] = ""
15
+ files: List[File] = []
16
+ auto_generate_name: bool = True
17
+
18
+
19
+ class ChatResponse(CompletionResponse):
20
+ pass
21
+
22
+
23
+ class ChatSuggestRequest(BaseModel):
24
+ user: str
25
+
26
+
27
+ class ChatSuggestResponse(BaseModel):
28
+ result: str
29
+ data: List[str] = []