|
import os |
|
import uuid |
|
|
|
from dotenv import load_dotenv |
|
|
|
from inference.core.utils.environment import safe_split_value, str2bool |
|
|
|
load_dotenv(os.getcwd() + "/.env") |
|
|
|
|
|
PROJECT = os.getenv("PROJECT", "roboflow-platform") |
|
|
|
|
|
ALLOW_NUMPY_INPUT = str2bool(os.getenv("ALLOW_NUMPY_INPUT", True)) |
|
|
|
|
|
ALLOW_ORIGINS = os.getenv("ALLOW_ORIGINS", "") |
|
ALLOW_ORIGINS = ALLOW_ORIGINS.split(",") |
|
|
|
|
|
API_BASE_URL = os.getenv( |
|
"API_BASE_URL", |
|
( |
|
"https://api.roboflow.com" |
|
if PROJECT == "roboflow-platform" |
|
else "https://api.roboflow.one" |
|
), |
|
) |
|
|
|
|
|
API_DEBUG = os.getenv("API_DEBUG", False) |
|
|
|
|
|
API_KEY_ENV_NAMES = ["ROBOFLOW_API_KEY", "API_KEY"] |
|
API_KEY = os.getenv(API_KEY_ENV_NAMES[0], None) or os.getenv(API_KEY_ENV_NAMES[1], None) |
|
|
|
|
|
AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID", None) |
|
|
|
|
|
AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY", None) |
|
|
|
COGVLM_LOAD_4BIT = str2bool(os.getenv("COGVLM_LOAD_4BIT", True)) |
|
COGVLM_LOAD_8BIT = str2bool(os.getenv("COGVLM_LOAD_8BIT", False)) |
|
COGVLM_VERSION_ID = os.getenv("COGVLM_VERSION_ID", "cogvlm-chat-hf") |
|
|
|
CLIP_VERSION_ID = os.getenv("CLIP_VERSION_ID", "ViT-B-16") |
|
|
|
|
|
CLIP_MODEL_ID = f"clip/{CLIP_VERSION_ID}" |
|
|
|
|
|
GAZE_VERSION_ID = os.getenv("GAZE_VERSION_ID", "L2CS") |
|
|
|
|
|
GAZE_MODEL_ID = f"gaze/{CLIP_VERSION_ID}" |
|
|
|
|
|
GAZE_MAX_BATCH_SIZE = int(os.getenv("GAZE_MAX_BATCH_SIZE", 8)) |
|
|
|
|
|
TINY_CACHE = str2bool(os.getenv("TINY_CACHE", True)) |
|
|
|
|
|
CLIP_MAX_BATCH_SIZE = int(os.getenv("CLIP_MAX_BATCH_SIZE", 8)) |
|
|
|
|
|
CLASS_AGNOSTIC_NMS_ENV = "CLASS_AGNOSTIC_NMS" |
|
DEFAULT_CLASS_AGNOSTIC_NMS = False |
|
CLASS_AGNOSTIC_NMS = str2bool( |
|
os.getenv(CLASS_AGNOSTIC_NMS_ENV, DEFAULT_CLASS_AGNOSTIC_NMS) |
|
) |
|
|
|
|
|
CONFIDENCE_ENV = "CONFIDENCE" |
|
DEFAULT_CONFIDENCE = 0.4 |
|
CONFIDENCE = float(os.getenv(CONFIDENCE_ENV, DEFAULT_CONFIDENCE)) |
|
|
|
|
|
CORE_MODELS_ENABLED = str2bool(os.getenv("CORE_MODELS_ENABLED", True)) |
|
|
|
|
|
CORE_MODEL_CLIP_ENABLED = str2bool(os.getenv("CORE_MODEL_CLIP_ENABLED", True)) |
|
|
|
|
|
CORE_MODEL_SAM_ENABLED = str2bool(os.getenv("CORE_MODEL_SAM_ENABLED", True)) |
|
|
|
|
|
CORE_MODEL_GAZE_ENABLED = str2bool(os.getenv("CORE_MODEL_GAZE_ENABLED", True)) |
|
|
|
|
|
CORE_MODEL_DOCTR_ENABLED = str2bool(os.getenv("CORE_MODEL_DOCTR_ENABLED", True)) |
|
|
|
|
|
CORE_MODEL_GROUNDINGDINO_ENABLED = str2bool( |
|
os.getenv("CORE_MODEL_GROUNDINGDINO_ENABLED", True) |
|
) |
|
|
|
|
|
CORE_MODEL_COGVLM_ENABLED = str2bool(os.getenv("CORE_MODEL_COGVLM_ENABLED", True)) |
|
|
|
|
|
CORE_MODEL_YOLO_WORLD_ENABLED = str2bool( |
|
os.getenv("CORE_MODEL_YOLO_WORLD_ENABLED", True) |
|
) |
|
|
|
|
|
DEVICE_ID = os.getenv("DEVICE_ID", None) |
|
|
|
|
|
DISABLE_INFERENCE_CACHE = str2bool(os.getenv("DISABLE_INFERENCE_CACHE", False)) |
|
|
|
|
|
DISABLE_PREPROC_AUTO_ORIENT = str2bool(os.getenv("DISABLE_PREPROC_AUTO_ORIENT", False)) |
|
|
|
|
|
DISABLE_PREPROC_CONTRAST = str2bool(os.getenv("DISABLE_PREPROC_CONTRAST", False)) |
|
|
|
|
|
DISABLE_PREPROC_GRAYSCALE = str2bool(os.getenv("DISABLE_PREPROC_GRAYSCALE", False)) |
|
|
|
|
|
DISABLE_PREPROC_STATIC_CROP = str2bool(os.getenv("DISABLE_PREPROC_STATIC_CROP", False)) |
|
|
|
|
|
DISABLE_VERSION_CHECK = str2bool(os.getenv("DISABLE_VERSION_CHECK", False)) |
|
|
|
|
|
ELASTICACHE_ENDPOINT = os.environ.get( |
|
"ELASTICACHE_ENDPOINT", |
|
( |
|
"roboflow-infer-prod.ljzegl.cfg.use2.cache.amazonaws.com:11211" |
|
if PROJECT == "roboflow-platform" |
|
else "roboflow-infer.ljzegl.cfg.use2.cache.amazonaws.com:11211" |
|
), |
|
) |
|
|
|
|
|
ENABLE_BYTE_TRACK = str2bool(os.getenv("ENABLE_BYTE_TRACK", False)) |
|
|
|
|
|
ENFORCE_FPS = str2bool(os.getenv("ENFORCE_FPS", False)) |
|
MAX_FPS = os.getenv("MAX_FPS") |
|
if MAX_FPS is not None: |
|
MAX_FPS = int(MAX_FPS) |
|
|
|
|
|
FIX_BATCH_SIZE = str2bool(os.getenv("FIX_BATCH_SIZE", False)) |
|
|
|
|
|
HOST = os.getenv("HOST", "0.0.0.0") |
|
|
|
|
|
IOU_THRESHOLD_ENV = "IOU_THRESHOLD" |
|
DEFAULT_IOU_THRESHOLD = 0.3 |
|
IOU_THRESHOLD = float(os.getenv(IOU_THRESHOLD_ENV, DEFAULT_IOU_THRESHOLD)) |
|
|
|
|
|
IP_BROADCAST_ADDR = os.getenv("IP_BROADCAST_ADDR", "127.0.0.1") |
|
|
|
|
|
IP_BROADCAST_PORT = int(os.getenv("IP_BROADCAST_PORT", 37020)) |
|
|
|
|
|
JSON_RESPONSE = str2bool(os.getenv("JSON_RESPONSE", True)) |
|
|
|
|
|
LAMBDA = str2bool(os.getenv("LAMBDA", False)) |
|
|
|
|
|
LEGACY_ROUTE_ENABLED = str2bool(os.getenv("LEGACY_ROUTE_ENABLED", True)) |
|
|
|
|
|
LICENSE_SERVER = os.getenv("LICENSE_SERVER", None) |
|
|
|
|
|
LOG_LEVEL = os.getenv("LOG_LEVEL", "WARNING") |
|
|
|
|
|
MAX_ACTIVE_MODELS = int(os.getenv("MAX_ACTIVE_MODELS", 8)) |
|
|
|
|
|
MAX_BATCH_SIZE = os.getenv("MAX_BATCH_SIZE", None) |
|
if MAX_BATCH_SIZE is not None: |
|
MAX_BATCH_SIZE = int(MAX_BATCH_SIZE) |
|
else: |
|
MAX_BATCH_SIZE = float("inf") |
|
|
|
|
|
MAX_CANDIDATES_ENV = "MAX_CANDIDATES" |
|
DEFAULT_MAX_CANDIDATES = 3000 |
|
MAX_CANDIDATES = int(os.getenv(MAX_CANDIDATES_ENV, DEFAULT_MAX_CANDIDATES)) |
|
|
|
|
|
MAX_DETECTIONS_ENV = "MAX_DETECTIONS" |
|
DEFAULT_MAX_DETECTIONS = 300 |
|
MAX_DETECTIONS = int(os.getenv(MAX_DETECTIONS_ENV, DEFAULT_MAX_DETECTIONS)) |
|
|
|
|
|
MEMORY_CACHE_EXPIRE_INTERVAL = int(os.getenv("MEMORY_CACHE_EXPIRE_INTERVAL", 5)) |
|
|
|
|
|
METRICS_ENABLED = str2bool(os.getenv("METRICS_ENABLED", True)) |
|
if LAMBDA: |
|
METRICS_ENABLED = False |
|
|
|
|
|
METRICS_INTERVAL = int(os.getenv("METRICS_INTERVAL", 60)) |
|
|
|
|
|
METRICS_URL = os.getenv("METRICS_URL", f"{API_BASE_URL}/inference-stats") |
|
|
|
|
|
MODEL_CACHE_DIR = os.getenv("MODEL_CACHE_DIR", "/tmp/cache") |
|
|
|
|
|
MODEL_ID = os.getenv("MODEL_ID") |
|
|
|
|
|
NOTEBOOK_ENABLED = str2bool(os.getenv("NOTEBOOK_ENABLED", False)) |
|
|
|
|
|
NOTEBOOK_PASSWORD = os.getenv("NOTEBOOK_PASSWORD", "roboflow") |
|
|
|
|
|
NOTEBOOK_PORT = int(os.getenv("NOTEBOOK_PORT", 9002)) |
|
|
|
|
|
NUM_WORKERS = int(os.getenv("NUM_WORKERS", 1)) |
|
|
|
ONNXRUNTIME_EXECUTION_PROVIDERS = os.getenv( |
|
"ONNXRUNTIME_EXECUTION_PROVIDERS", "[CUDAExecutionProvider,CPUExecutionProvider]" |
|
) |
|
|
|
|
|
PORT = int(os.getenv("PORT", 9001)) |
|
|
|
|
|
PROFILE = str2bool(os.getenv("PROFILE", False)) |
|
|
|
|
|
REDIS_HOST = os.getenv("REDIS_HOST", None) |
|
|
|
|
|
REDIS_PORT = int(os.getenv("REDIS_PORT", 6379)) |
|
REDIS_SSL = str2bool(os.getenv("REDIS_SSL", False)) |
|
REDIS_TIMEOUT = float(os.getenv("REDIS_TIMEOUT", 2.0)) |
|
|
|
|
|
REQUIRED_ONNX_PROVIDERS = safe_split_value(os.getenv("REQUIRED_ONNX_PROVIDERS", None)) |
|
|
|
|
|
ROBOFLOW_SERVER_UUID = os.getenv("ROBOFLOW_SERVER_UUID", str(uuid.uuid4())) |
|
|
|
|
|
ROBOFLOW_SERVICE_SECRET = os.getenv("ROBOFLOW_SERVICE_SECRET", None) |
|
|
|
|
|
SAM_MAX_EMBEDDING_CACHE_SIZE = int(os.getenv("SAM_MAX_EMBEDDING_CACHE_SIZE", 10)) |
|
|
|
|
|
SAM_VERSION_ID = os.getenv("SAM_VERSION_ID", "vit_h") |
|
|
|
|
|
|
|
INFERENCE_SERVER_ID = os.getenv("INFERENCE_SERVER_ID", None) |
|
|
|
|
|
STREAM_ID = os.getenv("STREAM_ID") |
|
try: |
|
STREAM_ID = int(STREAM_ID) |
|
except (TypeError, ValueError): |
|
pass |
|
|
|
|
|
TAGS = safe_split_value(os.getenv("TAGS", "")) |
|
|
|
|
|
TENSORRT_CACHE_PATH = os.getenv("TENSORRT_CACHE_PATH", MODEL_CACHE_DIR) |
|
|
|
|
|
os.environ["ORT_TENSORRT_CACHE_PATH"] = TENSORRT_CACHE_PATH |
|
|
|
|
|
VERSION_CHECK_MODE = os.getenv("VERSION_CHECK_MODE", "once") |
|
|
|
|
|
METLO_KEY = os.getenv("METLO_KEY", None) |
|
|
|
|
|
CORE_MODEL_BUCKET = os.getenv( |
|
"CORE_MODEL_BUCKET", |
|
( |
|
"roboflow-core-model-prod" |
|
if PROJECT == "roboflow-platform" |
|
else "roboflow-core-model-staging" |
|
), |
|
) |
|
|
|
|
|
INFER_BUCKET = os.getenv( |
|
"INFER_BUCKET", |
|
( |
|
"roboflow-infer-prod" |
|
if PROJECT == "roboflow-platform" |
|
else "roboflow-infer-staging" |
|
), |
|
) |
|
|
|
ACTIVE_LEARNING_ENABLED = str2bool(os.getenv("ACTIVE_LEARNING_ENABLED", True)) |
|
ACTIVE_LEARNING_TAGS = safe_split_value(os.getenv("ACTIVE_LEARNING_TAGS", None)) |
|
|
|
|
|
NUM_PARALLEL_TASKS = int(os.getenv("NUM_PARALLEL_TASKS", 512)) |
|
STUB_CACHE_SIZE = int(os.getenv("STUB_CACHE_SIZE", 256)) |
|
|
|
PREDICTIONS_QUEUE_SIZE = int( |
|
os.getenv("INFERENCE_PIPELINE_PREDICTIONS_QUEUE_SIZE", 512) |
|
) |
|
RESTART_ATTEMPT_DELAY = int(os.getenv("INFERENCE_PIPELINE_RESTART_ATTEMPT_DELAY", 1)) |
|
DEFAULT_BUFFER_SIZE = int(os.getenv("VIDEO_SOURCE_BUFFER_SIZE", "64")) |
|
DEFAULT_ADAPTIVE_MODE_STREAM_PACE_TOLERANCE = float( |
|
os.getenv("VIDEO_SOURCE_ADAPTIVE_MODE_STREAM_PACE_TOLERANCE", "0.1") |
|
) |
|
DEFAULT_ADAPTIVE_MODE_READER_PACE_TOLERANCE = float( |
|
os.getenv("VIDEO_SOURCE_ADAPTIVE_MODE_READER_PACE_TOLERANCE", "5.0") |
|
) |
|
DEFAULT_MINIMUM_ADAPTIVE_MODE_SAMPLES = int( |
|
os.getenv("VIDEO_SOURCE_MINIMUM_ADAPTIVE_MODE_SAMPLES", "10") |
|
) |
|
DEFAULT_MAXIMUM_ADAPTIVE_FRAMES_DROPPED_IN_ROW = int( |
|
os.getenv("VIDEO_SOURCE_MAXIMUM_ADAPTIVE_FRAMES_DROPPED_IN_ROW", "16") |
|
) |
|
|
|
NUM_CELERY_WORKERS = os.getenv("NUM_CELERY_WORKERS", 4) |
|
CELERY_LOG_LEVEL = os.getenv("CELERY_LOG_LEVEL", "WARNING") |
|
|
|
|
|
LOCAL_INFERENCE_API_URL = os.getenv("LOCAL_INFERENCE_API_URL", "http://127.0.0.1:9001") |
|
HOSTED_DETECT_URL = ( |
|
"https://detect.roboflow.com" |
|
if PROJECT == "roboflow-platform" |
|
else "https://lambda-object-detection.staging.roboflow.com" |
|
) |
|
HOSTED_INSTANCE_SEGMENTATION_URL = ( |
|
"https://outline.roboflow.com" |
|
if PROJECT == "roboflow-platform" |
|
else "https://lambda-instance-segmentation.staging.roboflow.com" |
|
) |
|
HOSTED_CLASSIFICATION_URL = ( |
|
"https://classify.roboflow.com" |
|
if PROJECT == "roboflow-platform" |
|
else "https://lambda-classification.staging.roboflow.com" |
|
) |
|
HOSTED_CORE_MODEL_URL = ( |
|
"https://infer.roboflow.com" |
|
if PROJECT == "roboflow-platform" |
|
else "https://3hkaykeh3j.execute-api.us-east-1.amazonaws.com" |
|
) |
|
|
|
DISABLE_WORKFLOW_ENDPOINTS = str2bool(os.getenv("DISABLE_WORKFLOW_ENDPOINTS", False)) |
|
WORKFLOWS_STEP_EXECUTION_MODE = os.getenv("WORKFLOWS_STEP_EXECUTION_MODE", "remote") |
|
WORKFLOWS_REMOTE_API_TARGET = os.getenv("WORKFLOWS_REMOTE_API_TARGET", "hosted") |
|
WORKFLOWS_MAX_CONCURRENT_STEPS = int(os.getenv("WORKFLOWS_MAX_CONCURRENT_STEPS", "8")) |
|
WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_BATCH_SIZE = int( |
|
os.getenv("WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_BATCH_SIZE", "1") |
|
) |
|
WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS = int( |
|
os.getenv("WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS", "8") |
|
) |
|
|