dify / docker /docker-compose.yaml
Severian's picture
initial commit
a8b3f00
raw
history blame
33.3 kB
x-shared-env: &shared-api-worker-env
WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10}
LOG_LEVEL: ${LOG_LEVEL:-INFO}
LOG_FILE: ${LOG_FILE:-}
LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20}
LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5}
DEBUG: ${DEBUG:-false}
FLASK_DEBUG: ${FLASK_DEBUG:-false}
SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U}
INIT_PASSWORD: ${INIT_PASSWORD:-}
CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-}
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
SERVICE_API_URL: ${SERVICE_API_URL:-}
APP_WEB_URL: ${APP_WEB_URL:-}
CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai}
OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1}
FILES_URL: ${FILES_URL:-}
FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0}
MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true}
DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION}
DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0}
DIFY_PORT: ${DIFY_PORT:-5001}
SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-}
SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-}
CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-}
GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360}
CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-}
CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false}
CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-}
CELERY_MIN_WORKERS: ${CELERY_MIN_WORKERS:-}
API_TOOL_DEFAULT_CONNECT_TIMEOUT: ${API_TOOL_DEFAULT_CONNECT_TIMEOUT:-10}
API_TOOL_DEFAULT_READ_TIMEOUT: ${API_TOOL_DEFAULT_READ_TIMEOUT:-60}
DB_USERNAME: ${DB_USERNAME:-postgres}
DB_PASSWORD: ${DB_PASSWORD:-difyai123456}
DB_HOST: ${DB_HOST:-db}
DB_PORT: ${DB_PORT:-5432}
DB_DATABASE: ${DB_DATABASE:-dify}
SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30}
SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600}
SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false}
REDIS_HOST: ${REDIS_HOST:-redis}
REDIS_PORT: ${REDIS_PORT:-6379}
REDIS_USERNAME: ${REDIS_USERNAME:-}
REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456}
REDIS_USE_SSL: ${REDIS_USE_SSL:-false}
REDIS_DB: ${REDIS_DB:-0}
REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false}
REDIS_SENTINELS: ${REDIS_SENTINELS:-}
REDIS_SENTINEL_SERVICE_NAME: ${REDIS_SENTINEL_SERVICE_NAME:-}
REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-}
REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-}
REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1}
ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60}
CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1}
BROKER_USE_SSL: ${BROKER_USE_SSL:-false}
CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false}
CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-}
CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1}
WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*}
CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*}
STORAGE_TYPE: ${STORAGE_TYPE:-local}
STORAGE_LOCAL_PATH: ${STORAGE_LOCAL_PATH:-storage}
S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false}
S3_ENDPOINT: ${S3_ENDPOINT:-}
S3_BUCKET_NAME: ${S3_BUCKET_NAME:-}
S3_ACCESS_KEY: ${S3_ACCESS_KEY:-}
S3_SECRET_KEY: ${S3_SECRET_KEY:-}
S3_REGION: ${S3_REGION:-us-east-1}
AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-}
AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-}
AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-}
AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-}
GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-}
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-}
ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-}
ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-}
ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-}
ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-}
ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-}
ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4}
ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-}
TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-}
TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-}
TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-}
TENCENT_COS_REGION: ${TENCENT_COS_REGION:-}
TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-}
HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-}
HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-}
HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-}
HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-}
OCI_ENDPOINT: ${OCI_ENDPOINT:-}
OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-}
OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-}
OCI_SECRET_KEY: ${OCI_SECRET_KEY:-}
OCI_REGION: ${OCI_REGION:-}
VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-}
VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-}
VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-}
VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-}
VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-}
BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-}
BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-}
BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-}
BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-}
VECTOR_STORE: ${VECTOR_STORE:-weaviate}
WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080}
WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333}
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20}
QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false}
QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334}
COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-'couchbase-server'}
COUCHBASE_USER: ${COUCHBASE_USER:-Administrator}
COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password}
COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings}
COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default}
MILVUS_URI: ${MILVUS_URI:-http://127.0.0.1:19530}
MILVUS_TOKEN: ${MILVUS_TOKEN:-}
MILVUS_USER: ${MILVUS_USER:-root}
MILVUS_PASSWORD: ${MILVUS_PASSWORD:-Milvus}
MYSCALE_HOST: ${MYSCALE_HOST:-myscale}
MYSCALE_PORT: ${MYSCALE_PORT:-8123}
MYSCALE_USER: ${MYSCALE_USER:-default}
MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-}
MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify}
MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-}
RELYT_HOST: ${RELYT_HOST:-db}
RELYT_PORT: ${RELYT_PORT:-5432}
RELYT_USER: ${RELYT_USER:-postgres}
RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456}
RELYT_DATABASE: ${RELYT_DATABASE:-postgres}
PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector}
PGVECTOR_PORT: ${PGVECTOR_PORT:-5432}
PGVECTOR_USER: ${PGVECTOR_USER:-postgres}
PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456}
PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify}
TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb}
TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000}
TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-}
TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-}
TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify}
TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1}
TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify}
TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20}
TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false}
TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334}
TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify}
TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify}
TIDB_API_URL: ${TIDB_API_URL:-http://127.0.0.1}
TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-http://127.0.0.1}
TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1}
TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify}
TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100}
ORACLE_HOST: ${ORACLE_HOST:-oracle}
ORACLE_PORT: ${ORACLE_PORT:-1521}
ORACLE_USER: ${ORACLE_USER:-dify}
ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify}
ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1}
CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1}
CHROMA_PORT: ${CHROMA_PORT:-8000}
CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant}
CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database}
CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider}
CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-}
ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0}
ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200}
ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic}
ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
LINDORM_URL: ${LINDORM_URL:-http://lindorm:30070}
LINDORM_USERNAME: ${LINDORM_USERNAME:-lindorm}
LINDORM_PASSWORD: ${LINDORM_USERNAME:-lindorm }
KIBANA_PORT: ${KIBANA_PORT:-5601}
# AnalyticDB configuration
ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-}
ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-}
ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-}
ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-}
ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-}
ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-}
ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify}
ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-}
OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch}
OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200}
OPENSEARCH_USER: ${OPENSEARCH_USER:-admin}
OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin}
OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true}
TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1}
TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify}
TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30}
TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify}
TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify}
TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1}
TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2}
BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287}
BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000}
BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root}
BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify}
BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify}
BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1}
BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3}
VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-dify}
VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-dify}
VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai}
VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com}
VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http}
UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io}
UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify}
UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15}
UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5}
ETL_TYPE: ${ETL_TYPE:-dify}
UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-}
PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512}
CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024}
MULTIMODAL_SEND_IMAGE_FORMAT: ${MULTIMODAL_SEND_IMAGE_FORMAT:-base64}
UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10}
UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100}
UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50}
SENTRY_DSN: ${API_SENTRY_DSN:-}
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public}
NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-}
NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-}
NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-}
MAIL_TYPE: ${MAIL_TYPE:-resend}
MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-}
SMTP_SERVER: ${SMTP_SERVER:-}
SMTP_PORT: ${SMTP_PORT:-465}
SMTP_USERNAME: ${SMTP_USERNAME:-}
SMTP_PASSWORD: ${SMTP_PASSWORD:-}
SMTP_USE_TLS: ${SMTP_USE_TLS:-true}
SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false}
RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key}
RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com}
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-1000}
INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72}
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5}
CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194}
CODE_EXECUTION_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807}
CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808}
CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5}
CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20}
CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000}
TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000}
CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30}
CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30}
CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000}
WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500}
WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200}
WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5}
SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128}
SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128}
HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760}
HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576}
APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-12000}
POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-}
POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-}
POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-}
POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-}
POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-}
POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-}
MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800}
OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-http://oceanbase-vector}
OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881}
OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test}
OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-""}
OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test}
OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
services:
# API service
api:
image: langgenius/dify-api:0.11.0
restart: always
environment:
# Use the shared environment variables.
<<: *shared-api-worker-env
# Startup mode, 'api' starts the API server.
MODE: api
depends_on:
- db
- redis
volumes:
# Mount the storage directory to the container, for storing user files.
- ./volumes/app/storage:/app/api/storage
networks:
- ssrf_proxy_network
- default
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:0.11.0
restart: always
environment:
# Use the shared environment variables.
<<: *shared-api-worker-env
# Startup mode, 'worker' starts the Celery worker for processing the queue.
MODE: worker
depends_on:
- db
- redis
volumes:
# Mount the storage directory to the container, for storing user files.
- ./volumes/app/storage:/app/api/storage
networks:
- ssrf_proxy_network
- default
# Frontend web application.
web:
image: langgenius/dify-web:0.11.0
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
APP_API_URL: ${APP_API_URL:-}
SENTRY_DSN: ${WEB_SENTRY_DSN:-}
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
CSP_WHITELIST: ${CSP_WHITELIST:-}
# The postgres database.
db:
image: postgres:15-alpine
restart: always
environment:
PGUSER: ${PGUSER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
POSTGRES_DB: ${POSTGRES_DB:-dify}
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
command: >
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
-c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
volumes:
- ./volumes/db/data:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready']
interval: 1s
timeout: 3s
retries: 30
# The redis cache.
redis:
image: redis:6-alpine
restart: always
volumes:
# Mount the redis data directory to the container.
- ./volumes/redis/data:/data
# Set the redis password when startup redis server.
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
healthcheck:
test: ['CMD', 'redis-cli', 'ping']
# The DifySandbox
sandbox:
image: langgenius/dify-sandbox:0.2.10
restart: always
environment:
# The DifySandbox configurations
# Make sure you are changing this key for your deployment with a strong key.
# You can generate a strong key using `openssl rand -base64 42`.
API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
GIN_MODE: ${SANDBOX_GIN_MODE:-release}
WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
volumes:
- ./volumes/sandbox/dependencies:/dependencies
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:8194/health']
networks:
- ssrf_proxy_network
# ssrf_proxy server
# for more information, please refer to
# https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
ssrf_proxy:
image: ubuntu/squid:latest
restart: always
volumes:
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
entrypoint:
[
'sh',
'-c',
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
]
environment:
# pls clearly modify the squid env vars to fit your network environment.
HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
networks:
- ssrf_proxy_network
- default
# Certbot service
# use `docker-compose --profile certbot up` to start the certbot service.
certbot:
image: certbot/certbot
profiles:
- certbot
volumes:
- ./volumes/certbot/conf:/etc/letsencrypt
- ./volumes/certbot/www:/var/www/html
- ./volumes/certbot/logs:/var/log/letsencrypt
- ./volumes/certbot/conf/live:/etc/letsencrypt/live
- ./certbot/update-cert.template.txt:/update-cert.template.txt
- ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
environment:
- CERTBOT_EMAIL=${CERTBOT_EMAIL}
- CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
- CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
entrypoint: ['/docker-entrypoint.sh']
command: ['tail', '-f', '/dev/null']
# The nginx reverse proxy.
# used for reverse proxying the API service and Web service.
nginx:
image: nginx:latest
restart: always
volumes:
- ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
- ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
- ./nginx/https.conf.template:/etc/nginx/https.conf.template
- ./nginx/conf.d:/etc/nginx/conf.d
- ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
- ./nginx/ssl:/etc/ssl # cert dir (legacy)
- ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
- ./volumes/certbot/conf:/etc/letsencrypt
- ./volumes/certbot/www:/var/www/html
entrypoint:
[
'sh',
'-c',
"cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh",
]
environment:
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
NGINX_PORT: ${NGINX_PORT:-80}
# You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
# and modify the env vars below in .env if HTTPS_ENABLED is true.
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
depends_on:
- api
- web
ports:
- '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}'
- '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}'
# The Weaviate vector store.
weaviate:
image: semitechnologies/weaviate:1.19.0
profiles:
- ''
- weaviate
restart: always
volumes:
# Mount the Weaviate data directory to the con tainer.
- ./volumes/weaviate:/var/lib/weaviate
environment:
# The Weaviate configurations
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
# Qdrant vector store.
# (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
qdrant:
image: langgenius/qdrant:v1.7.3
profiles:
- qdrant
restart: always
volumes:
- ./volumes/qdrant:/qdrant/storage
environment:
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
# The Couchbase vector store.
couchbase-server:
build: ./couchbase-server
profiles:
- couchbase
restart: always
environment:
- CLUSTER_NAME=dify_search
- COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
- COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
- COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
- COUCHBASE_BUCKET_RAMSIZE=512
- COUCHBASE_RAM_SIZE=2048
- COUCHBASE_EVENTING_RAM_SIZE=512
- COUCHBASE_INDEX_RAM_SIZE=512
- COUCHBASE_FTS_RAM_SIZE=1024
hostname: couchbase-server
container_name: couchbase-server
working_dir: /opt/couchbase
stdin_open: true
tty: true
entrypoint: [""]
command: sh -c "/opt/couchbase/init/init-cbserver.sh"
volumes:
- ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
healthcheck:
# ensure bucket was created before proceeding
test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ]
interval: 10s
retries: 10
start_period: 30s
timeout: 10s
# The pgvector vector database.
pgvector:
image: pgvector/pgvector:pg16
profiles:
- pgvector
restart: always
environment:
PGUSER: ${PGVECTOR_PGUSER:-postgres}
# The password for the default postgres user.
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
# The name of the default postgres database.
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
# postgres data directory
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
volumes:
- ./volumes/pgvector/data:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready']
interval: 1s
timeout: 3s
retries: 30
# pgvecto-rs vector store
pgvecto-rs:
image: tensorchord/pgvecto-rs:pg16-v0.3.0
profiles:
- pgvecto-rs
restart: always
environment:
PGUSER: ${PGVECTOR_PGUSER:-postgres}
# The password for the default postgres user.
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
# The name of the default postgres database.
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
# postgres data directory
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
volumes:
- ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready']
interval: 1s
timeout: 3s
retries: 30
# Chroma vector database
chroma:
image: ghcr.io/chroma-core/chroma:0.5.1
profiles:
- chroma
restart: always
volumes:
- ./volumes/chroma:/chroma/chroma
environment:
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
# OceanBase vector database
oceanbase-vector:
image: quay.io/oceanbase/oceanbase-ce:4.3.3.0-100000142024101215
profiles:
- oceanbase-vector
restart: always
volumes:
- ./volumes/oceanbase/data:/root/ob
- ./volumes/oceanbase/conf:/root/.obd/cluster
environment:
OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
# Oracle vector database
oracle:
image: container-registry.oracle.com/database/free:latest
profiles:
- oracle
restart: always
volumes:
- source: oradata
type: volume
target: /opt/oracle/oradata
- ./startupscripts:/opt/oracle/scripts/startup
environment:
ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
# Milvus vector database services
etcd:
container_name: milvus-etcd
image: quay.io/coreos/etcd:v3.5.5
profiles:
- milvus
environment:
ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
volumes:
- ./volumes/milvus/etcd:/etcd
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
healthcheck:
test: ['CMD', 'etcdctl', 'endpoint', 'health']
interval: 30s
timeout: 20s
retries: 3
networks:
- milvus
minio:
container_name: milvus-minio
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
profiles:
- milvus
environment:
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
volumes:
- ./volumes/milvus/minio:/minio_data
command: minio server /minio_data --console-address ":9001"
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live']
interval: 30s
timeout: 20s
retries: 3
networks:
- milvus
milvus-standalone:
container_name: milvus-standalone
image: milvusdb/milvus:v2.3.1
profiles:
- milvus
command: ['milvus', 'run', 'standalone']
environment:
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
volumes:
- ./volumes/milvus/milvus:/var/lib/milvus
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:9091/healthz']
interval: 30s
start_period: 90s
timeout: 20s
retries: 3
depends_on:
- etcd
- minio
ports:
- 19530:19530
- 9091:9091
networks:
- milvus
# Opensearch vector database
opensearch:
container_name: opensearch
image: opensearchproject/opensearch:latest
profiles:
- opensearch
environment:
discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
ulimits:
memlock:
soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
nofile:
soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
hard: ${OPENSEARCH_NOFILE_HARD:-65536}
volumes:
- ./volumes/opensearch/data:/usr/share/opensearch/data
networks:
- opensearch-net
opensearch-dashboards:
container_name: opensearch-dashboards
image: opensearchproject/opensearch-dashboards:latest
profiles:
- opensearch
environment:
OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
volumes:
- ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
networks:
- opensearch-net
depends_on:
- opensearch
# MyScale vector database
myscale:
container_name: myscale
image: myscale/myscaledb:1.6.4
profiles:
- myscale
restart: always
tty: true
volumes:
- ./volumes/myscale/data:/var/lib/clickhouse
- ./volumes/myscale/log:/var/log/clickhouse-server
- ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
ports:
- ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
# https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
# https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
container_name: elasticsearch
profiles:
- elasticsearch
restart: always
volumes:
- dify_es01_data:/usr/share/elasticsearch/data
environment:
ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
cluster.name: dify-es-cluster
node.name: dify-es0
discovery.type: single-node
xpack.license.self_generated.type: trial
xpack.security.enabled: 'true'
xpack.security.enrollment.enabled: 'false'
xpack.security.http.ssl.enabled: 'false'
ports:
- ${ELASTICSEARCH_PORT:-9200}:9200
healthcheck:
test: ['CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty']
interval: 30s
timeout: 10s
retries: 50
# https://www.elastic.co/guide/en/kibana/current/docker.html
# https://www.elastic.co/guide/en/kibana/current/settings.html
kibana:
image: docker.elastic.co/kibana/kibana:8.14.3
container_name: kibana
profiles:
- elasticsearch
depends_on:
- elasticsearch
restart: always
environment:
XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
XPACK_SECURITY_ENABLED: 'true'
XPACK_SECURITY_ENROLLMENT_ENABLED: 'false'
XPACK_SECURITY_HTTP_SSL_ENABLED: 'false'
XPACK_FLEET_ISAIRGAPPED: 'true'
I18N_LOCALE: zh-CN
SERVER_PORT: '5601'
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
ports:
- ${KIBANA_PORT:-5601}:5601
healthcheck:
test: ['CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1']
interval: 30s
timeout: 10s
retries: 3
# unstructured .
# (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
unstructured:
image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
profiles:
- unstructured
restart: always
volumes:
- ./volumes/unstructured:/app/data
networks:
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
ssrf_proxy_network:
driver: bridge
internal: true
milvus:
driver: bridge
opensearch-net:
driver: bridge
internal: true
volumes:
oradata:
dify_es01_data: