kabancov_et commited on
Commit
cafc0bb
·
1 Parent(s): dc7c66f

Prepare for HF deployment: clean structure, HF optimizations, remove demo files

Browse files
Files changed (13) hide show
  1. .gitignore +35 -36
  2. Dockerfile +33 -4
  3. README.md +76 -1
  4. __init__.py +30 -0
  5. app.py +0 -168
  6. config.py +243 -0
  7. env.example +59 -0
  8. model_manager.py +75 -0
  9. rate_limiter.py +78 -0
  10. request_queue.py +71 -0
  11. requirements.txt +21 -5
  12. start.py +82 -0
  13. user_manager.py +25 -0
.gitignore CHANGED
@@ -1,10 +1,4 @@
1
- # Virtual Environment
2
- .venv/
3
- venv/
4
- env/
5
- ENV/
6
-
7
- # Python cache
8
  __pycache__/
9
  *.py[cod]
10
  *$py.class
@@ -27,8 +21,14 @@ wheels/
27
  *.egg
28
  MANIFEST
29
 
30
- # Old files directory
31
- old_files/
 
 
 
 
 
 
32
 
33
  # IDE
34
  .vscode/
@@ -50,36 +50,35 @@ Thumbs.db
50
  *.log
51
  logs/
52
 
53
- # Environment variables
54
- .env
55
- .env.local
56
- .env.development.local
57
- .env.test.local
58
- .env.production.local
59
 
60
- # Temporary files
61
- *.tmp
62
- *.temp
63
  temp/
64
  tmp/
 
65
 
66
- # Model files (if any)
67
- *.pth
68
- *.pt
69
- *.onnx
70
- *.bin
 
 
 
 
 
 
 
 
71
 
72
- # Jupyter Notebook
73
- .ipynb_checkpoints
74
 
75
- # Coverage reports
76
- htmlcov/
77
- .tox/
78
- .coverage
79
- .coverage.*
80
- .cache
81
- nosetests.xml
82
- coverage.xml
83
- *.cover
84
- .hypothesis/
85
- .pytest_cache/
 
1
+ # Python
 
 
 
 
 
 
2
  __pycache__/
3
  *.py[cod]
4
  *$py.class
 
21
  *.egg
22
  MANIFEST
23
 
24
+ # Virtual environments
25
+ .env
26
+ .venv
27
+ env/
28
+ venv/
29
+ ENV/
30
+ env.bak/
31
+ venv.bak/
32
 
33
  # IDE
34
  .vscode/
 
50
  *.log
51
  logs/
52
 
53
+ # Cache
54
+ cache/
55
+ *.cache
56
+ .hf_cache/
 
 
57
 
58
+ # Results and temporary files
59
+ results/
 
60
  temp/
61
  tmp/
62
+ *.tmp
63
 
64
+ # Test files
65
+ test_*.py
66
+ *_test.py
67
+ test_image.jpg
68
+
69
+ # Documentation (keep only main README)
70
+ README_RU.md
71
+ demo.html
72
+
73
+ # Environment files (will be set in HF)
74
+ .env
75
+ .env.local
76
+ .env.production
77
 
78
+ # Docker
79
+ .dockerignore
80
 
81
+ # HF specific
82
+ .hf_cache/
83
+ transformers_cache/
84
+ datasets_cache/
 
 
 
 
 
 
 
Dockerfile CHANGED
@@ -1,15 +1,16 @@
1
  # Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
  # you will also find guides on how best to write your Dockerfile
3
 
4
- FROM python:3.9
5
 
6
- # Install only server-side dependencies for OpenCV (no GUI)
7
  RUN apt-get update && apt-get install -y --no-install-recommends \
8
  libjpeg62-turbo-dev \
9
  libpng16-16 \
10
  libtiff6 \
11
  libopenblas-dev \
12
  gfortran \
 
13
  && rm -rf /var/lib/apt/lists/*
14
 
15
  # The two following lines are requirements for the Dev Mode to be functional
@@ -17,8 +18,36 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
17
  RUN useradd -m -u 1000 user
18
  WORKDIR /app
19
 
20
- COPY --chown=user ./requirements.txt requirements.txt
21
- RUN pip install --no-cache-dir --upgrade -r requirements.txt
 
 
22
 
 
23
  COPY --chown=user . /app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
 
 
 
 
 
 
 
 
 
 
 
1
  # Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
  # you will also find guides on how best to write your Dockerfile
3
 
4
+ FROM python:3.9-slim
5
 
6
+ # Install system dependencies
7
  RUN apt-get update && apt-get install -y --no-install-recommends \
8
  libjpeg62-turbo-dev \
9
  libpng16-16 \
10
  libtiff6 \
11
  libopenblas-dev \
12
  gfortran \
13
+ curl \
14
  && rm -rf /var/lib/apt/lists/*
15
 
16
  # The two following lines are requirements for the Dev Mode to be functional
 
18
  RUN useradd -m -u 1000 user
19
  WORKDIR /app
20
 
21
+ # Copy requirements first for better caching
22
+ COPY --chown=user requirements.txt requirements.txt
23
+ RUN pip install --no-cache-dir --upgrade pip && \
24
+ pip install --no-cache-dir -r requirements.txt
25
 
26
+ # Copy application code
27
  COPY --chown=user . /app
28
+
29
+ # Create necessary directories
30
+ RUN mkdir -p /app/logs /app/cache && chown -R user:user /app
31
+
32
+ # Switch to user
33
+ USER user
34
+
35
+ # Health check
36
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
37
+ CMD curl -f http://localhost:7860/health || exit 1
38
+
39
+ # Expose port
40
+ EXPOSE 7860
41
+
42
+ # Default command (can be overridden)
43
  CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
44
+
45
+ # Alternative commands for different deployment scenarios:
46
+ # For production with multiple workers:
47
+ # CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860", "--workers", "4"]
48
+ #
49
+ # For development with auto-reload:
50
+ # CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860", "--reload"]
51
+ #
52
+ # For Hugging Face Spaces (single worker recommended):
53
+ # CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -9,4 +9,79 @@ license: mit
9
  short_description: Clothing segmentation and background removal
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  short_description: Clothing segmentation and background removal
10
  ---
11
 
12
+ # Loomi Clothing Detection API 🚀
13
+
14
+ AI-powered clothing analysis and segmentation API, optimized for Hugging Face Spaces.
15
+
16
+ ## ✨ Features
17
+
18
+ - **🧠 AI-Powered**: Uses Segformer model for clothing detection
19
+ - **🖼️ Image Processing**: Background removal and dominant color detection
20
+ - **⚡ Async**: Non-blocking model loading and request processing
21
+ - **🚦 Rate Limiting**: Per-user request limits and concurrent control
22
+ - **👥 Multi-User**: Supports multiple users with isolation
23
+ - **🔧 HF Optimized**: Built specifically for Hugging Face Spaces
24
+
25
+ ## 🚀 Quick Start
26
+
27
+ ### API Endpoints
28
+
29
+ - `GET /` - API overview
30
+ - `GET /health` - System health and status
31
+ - `GET /user/stats` - User usage statistics
32
+ - `POST /clothing` - Detect clothing types and coordinates
33
+ - `POST /analyze` - Full analysis with color detection
34
+ - `POST /analyze/download` - Download processed images
35
+
36
+ ### Usage Example
37
+
38
+ ```python
39
+ import requests
40
+
41
+ # Upload image for clothing detection
42
+ with open('image.jpg', 'rb') as f:
43
+ response = requests.post(
44
+ 'https://your-hf-space.hf.space/clothing',
45
+ files={'file': f}
46
+ )
47
+ result = response.json()
48
+ print(result)
49
+ ```
50
+
51
+ ## 🏗️ Architecture
52
+
53
+ - **FastAPI**: Modern, fast web framework
54
+ - **Async Processing**: Non-blocking operations
55
+ - **Rate Limiting**: User-based request control
56
+ - **Model Management**: Efficient ML model loading
57
+ - **Queue System**: Background task processing
58
+
59
+ ## 🔧 Configuration
60
+
61
+ The API automatically detects Hugging Face Spaces and applies optimizations:
62
+
63
+ - Single worker process
64
+ - Conservative rate limits (15 req/min, 5 concurrent)
65
+ - Optimized cache sizes
66
+ - HF-specific environment variables
67
+
68
+ ## 📱 Integration
69
+
70
+ Perfect for:
71
+ - Mobile apps (React Native, Flutter)
72
+ - Web applications
73
+ - E-commerce platforms
74
+ - Fashion analysis tools
75
+
76
+ ## 🤝 Contributing
77
+
78
+ 1. Fork the repository
79
+ 2. Create a feature branch
80
+ 3. Make your changes
81
+ 4. Submit a pull request
82
+
83
+ ---
84
+
85
+ **Made with ❤️ by the Loomi Team**
86
+
87
+ *AI-powered clothing analysis, production ready! 🎯*
__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Loomi Clothing Detection API
3
+ ============================
4
+
5
+ A clean, modular FastAPI application for AI-powered clothing analysis and segmentation.
6
+
7
+ Features:
8
+ - Async model loading
9
+ - Rate limiting and user management
10
+ - Queue-based processing
11
+ - Token system foundation
12
+ - Clean, maintainable code structure
13
+
14
+ Modules:
15
+ - main.py: Main FastAPI application
16
+ - config.py: Configuration management
17
+ - rate_limiter.py: Rate limiting and user tracking
18
+ - model_manager.py: Async ML model management
19
+ - request_queue.py: Background task processing
20
+ - user_manager.py: User identification and management
21
+ - clothing_detector.py: Core ML inference
22
+ - process.py: Image processing utilities
23
+
24
+ Author: Loomi Team
25
+ Version: 2.0.0
26
+ """
27
+
28
+ __version__ = "2.0.0"
29
+ __author__ = "Loomi Team"
30
+ __description__ = "AI-powered clothing analysis and segmentation API"
app.py DELETED
@@ -1,168 +0,0 @@
1
- from fastapi import FastAPI, UploadFile, File, Form, HTTPException, Request
2
- from fastapi.responses import JSONResponse
3
- from fastapi.middleware.cors import CORSMiddleware
4
- from pydantic import BaseModel
5
- from typing import Optional, List
6
- from process import get_dominant_color_from_base64
7
- from clothing_detector import (
8
- detect_clothing_types,
9
- create_clothing_only_image,
10
- get_clothing_detector,
11
- )
12
- import logging
13
- import os
14
- import base64
15
- from starlette import status
16
-
17
- # Logging setup
18
- logging.basicConfig(level=logging.INFO)
19
- logger = logging.getLogger(__name__)
20
-
21
- app = FastAPI(title="FashionAI API", description="Clothing analysis & segmentation API")
22
-
23
- # CORS (configure with env ALLOWED_ORIGINS="http://localhost:5173,https://your-site")
24
- allowed_origins_env = os.getenv("ALLOWED_ORIGINS", "*")
25
- allow_origins: List[str]
26
- if allowed_origins_env.strip() == "*":
27
- allow_origins = ["*"]
28
- else:
29
- allow_origins = [o.strip() for o in allowed_origins_env.split(",") if o.strip()]
30
-
31
- app.add_middleware(
32
- CORSMiddleware,
33
- allow_origins=allow_origins,
34
- allow_credentials=True,
35
- allow_methods=["*"],
36
- allow_headers=["*"],
37
- )
38
-
39
- # API settings
40
- MAX_UPLOAD_MB = int(os.getenv("MAX_UPLOAD_MB", "10"))
41
- MAX_UPLOAD_BYTES = MAX_UPLOAD_MB * 1024 * 1024
42
- ALLOWED_CONTENT_TYPES = {
43
- c.strip() for c in os.getenv("ALLOWED_CONTENT_TYPES", "image/jpeg,image/png,image/webp").split(",") if c.strip()
44
- }
45
-
46
-
47
- @app.exception_handler(Exception)
48
- async def unhandled_exception_handler(request: Request, exc: Exception):
49
- logging.exception("Unhandled server error: %s", exc)
50
- return JSONResponse(
51
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
52
- content={"error": "Internal Server Error"},
53
- )
54
-
55
- @app.on_event("startup")
56
- async def maybe_warmup_model():
57
- if os.getenv("WARMUP_ON_STARTUP", "true").lower() in {"1", "true", "yes"}:
58
- # Warm up model on startup to reduce first request latency
59
- get_clothing_detector()
60
-
61
-
62
- @app.get("/")
63
- async def api_root():
64
- return JSONResponse({
65
- "name": "FashionAI API",
66
- "status": "ok",
67
- "docs": "/docs",
68
- "endpoints": ["/clothing", "/analyze", "/analyze/base64", "/labels", "/healthz"],
69
- })
70
-
71
-
72
- @app.get("/healthz")
73
- async def health_check():
74
- return {"status": "ok"}
75
-
76
- @app.post("/clothing")
77
- async def get_clothing_list(file: UploadFile = File(...)):
78
- """Detect all clothing types on image and return coordinates."""
79
- logger.info(f"Processing clothing detection for file: {file.filename}")
80
- # Validation
81
- if file.content_type not in ALLOWED_CONTENT_TYPES:
82
- raise HTTPException(status_code=415, detail=f"Unsupported content-type: {file.content_type}")
83
- # Read with size guard
84
- image_bytes = await file.read()
85
- if len(image_bytes) > MAX_UPLOAD_BYTES:
86
- raise HTTPException(status_code=413, detail=f"File too large. Max {MAX_UPLOAD_MB}MB")
87
- clothing_result = detect_clothing_types(image_bytes)
88
- logger.info(f"Clothing detection completed. Found {clothing_result.get('total_detected', 0)} items")
89
- return clothing_result
90
-
91
- @app.post("/analyze")
92
- async def analyze_image(
93
- file: UploadFile = File(...),
94
- selected_clothing: Optional[str] = Form(None)
95
- ):
96
- """
97
- Full image analysis: clothing detection, clothing-only image, dominant color.
98
-
99
- - selected_clothing: Optional clothing type to focus on
100
- - color: Dominant color of clothing
101
- - clothing_analysis: Detected clothing types with stats
102
- - clothing_only_image: Base64 PNG with transparent background
103
- """
104
- logger.info(f"Processing full analysis for file: {file.filename}, selected_clothing: {selected_clothing}")
105
- if file.content_type not in ALLOWED_CONTENT_TYPES:
106
- raise HTTPException(status_code=415, detail=f"Unsupported content-type: {file.content_type}")
107
- image_bytes = await file.read()
108
- if len(image_bytes) > MAX_UPLOAD_BYTES:
109
- raise HTTPException(status_code=413, detail=f"File too large. Max {MAX_UPLOAD_MB}MB")
110
-
111
- # Step 1: Detect clothing types (cached segmentation)
112
- logger.info("Detecting clothing types...")
113
- clothing_result = detect_clothing_types(image_bytes)
114
-
115
- # Step 2: Create clothing-only image (cached segmentation)
116
- logger.info("Creating clothing-only image...")
117
- clothing_only_image = create_clothing_only_image(image_bytes, selected_clothing)
118
-
119
- # Step 3: Get dominant color from clothing-only image (no background)
120
- logger.info("Getting dominant color from clothing-only image...")
121
- color = get_dominant_color_from_base64(clothing_only_image)
122
-
123
- logger.info("Full analysis completed successfully")
124
- return JSONResponse(content={
125
- "dominant_color": color,
126
- "clothing_analysis": clothing_result,
127
- "clothing_only_image": clothing_only_image,
128
- "selected_clothing": selected_clothing
129
- })
130
-
131
-
132
- class Base64AnalyzeRequest(BaseModel):
133
- image_base64: str
134
- selected_clothing: Optional[str] = None
135
-
136
-
137
- @app.post("/analyze/base64")
138
- async def analyze_image_base64(payload: Base64AnalyzeRequest):
139
- """Analyze base64-encoded image (handy for React Native)."""
140
- # Decode image from base64
141
- if payload.image_base64.startswith("data:image"):
142
- base64_data = payload.image_base64.split(",", 1)[1]
143
- else:
144
- base64_data = payload.image_base64
145
-
146
- image_bytes = base64.b64decode(base64_data)
147
-
148
- # 1) Clothing detection
149
- clothing_result = detect_clothing_types(image_bytes)
150
-
151
- # 2) Clothing-only image
152
- clothing_only_image = create_clothing_only_image(image_bytes, payload.selected_clothing)
153
-
154
- # 3) Dominant color from clothing-only image
155
- color = get_dominant_color_from_base64(clothing_only_image)
156
-
157
- return JSONResponse(content={
158
- "dominant_color": color,
159
- "clothing_analysis": clothing_result,
160
- "clothing_only_image": clothing_only_image,
161
- "selected_clothing": payload.selected_clothing,
162
- })
163
-
164
-
165
- @app.get("/labels")
166
- async def get_labels():
167
- detector = get_clothing_detector()
168
- return {"labels": list(detector.labels.values())}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Dict, Any
3
+ from dataclasses import dataclass, field
4
+
5
+ @dataclass
6
+ class APIConfig:
7
+ """Configuration class for the Loomi Clothing Detection API."""
8
+
9
+ # API Settings
10
+ title: str = "Loomi Clothing Detection API"
11
+ description: str = "AI-powered clothing analysis and segmentation API with rate limiting and user management"
12
+ version: str = "2.0.0"
13
+
14
+ # Server Settings
15
+ host: str = "0.0.0.0"
16
+ port: int = 7860
17
+ workers: int = 1
18
+ reload: bool = False
19
+
20
+ # File Upload Settings
21
+ max_upload_mb: int = 10
22
+ max_upload_bytes: int = field(init=False)
23
+ allowed_content_types: set = field(default_factory=lambda: {"image/jpeg", "image/png", "image/webp"})
24
+
25
+ # Rate Limiting
26
+ rate_limit_requests: int = 10 # requests per minute
27
+ rate_limit_window: int = 60 # seconds
28
+ max_concurrent_requests: int = 5 # per user
29
+
30
+ # Model Settings
31
+ model_warmup_on_startup: bool = True
32
+ model_cache_size: int = 10
33
+
34
+ # Queue Settings
35
+ num_workers: int = 2
36
+ queue_max_size: int = 100
37
+
38
+ # CORS Settings
39
+ allowed_origins: List[str] = field(default_factory=lambda: ["*"])
40
+
41
+ # Security Settings
42
+ enable_auth: bool = False
43
+ jwt_secret: str = "your-secret-key-change-in-production"
44
+ jwt_algorithm: str = "HS256"
45
+ jwt_expire_minutes: int = 60
46
+
47
+ # Token System Settings (Future)
48
+ enable_token_system: bool = False
49
+ free_tier_requests_per_day: int = 100
50
+ premium_tier_requests_per_day: int = 1000
51
+
52
+ # Logging Settings
53
+ log_level: str = "INFO"
54
+ log_format: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
55
+
56
+ # Monitoring Settings
57
+ enable_metrics: bool = False
58
+ metrics_port: int = 8000
59
+
60
+ # Cache Settings
61
+ enable_redis: bool = False
62
+ redis_host: str = "localhost"
63
+ redis_port: int = 6379
64
+ redis_db: int = 0
65
+ redis_password: str = ""
66
+
67
+ # Hugging Face Spaces Settings
68
+ is_huggingface_space: bool = False
69
+ space_id: str = ""
70
+ hf_space: bool = False
71
+ hf_cache_dir: str = "/tmp/hf_cache"
72
+
73
+ def __post_init__(self):
74
+ """Post-initialization to set computed fields and load from environment."""
75
+ # Load from environment variables
76
+ self.host = os.getenv("HOST", self.host)
77
+ self.port = int(os.getenv("PORT", str(self.port)))
78
+ self.workers = int(os.getenv("WORKERS", str(self.workers)))
79
+ self.reload = os.getenv("RELOAD", str(self.reload)).lower() == "true"
80
+
81
+ self.max_upload_mb = int(os.getenv("MAX_UPLOAD_MB", str(self.max_upload_mb)))
82
+ self.max_upload_bytes = self.max_upload_mb * 1024 * 1024
83
+
84
+ # Handle allowed content types
85
+ content_types_env = os.getenv("ALLOWED_CONTENT_TYPES")
86
+ if content_types_env:
87
+ self.allowed_content_types = {c.strip() for c in content_types_env.split(",") if c.strip()}
88
+
89
+ self.rate_limit_requests = int(os.getenv("RATE_LIMIT_REQUESTS", str(self.rate_limit_requests)))
90
+ self.rate_limit_window = int(os.getenv("RATE_LIMIT_WINDOW", str(self.rate_limit_window)))
91
+ self.max_concurrent_requests = int(os.getenv("MAX_CONCURRENT_REQUESTS", str(self.max_concurrent_requests)))
92
+
93
+ self.model_warmup_on_startup = os.getenv("MODEL_WARMUP_ON_STARTUP", str(self.model_warmup_on_startup)).lower() == "true"
94
+ self.model_cache_size = int(os.getenv("MODEL_CACHE_SIZE", str(self.model_cache_size)))
95
+
96
+ self.num_workers = int(os.getenv("NUM_WORKERS", str(self.num_workers)))
97
+ self.queue_max_size = int(os.getenv("QUEUE_MAX_SIZE", str(self.queue_max_size)))
98
+
99
+ # Handle allowed origins
100
+ origins_env = os.getenv("ALLOWED_ORIGINS")
101
+ if origins_env and origins_env != "*":
102
+ self.allowed_origins = [o.strip() for o in origins_env.split(",") if o.strip()]
103
+
104
+ self.enable_auth = os.getenv("ENABLE_AUTH", str(self.enable_auth)).lower() == "true"
105
+ self.jwt_secret = os.getenv("JWT_SECRET", self.jwt_secret)
106
+ self.jwt_algorithm = os.getenv("JWT_ALGORITHM", self.jwt_algorithm)
107
+ self.jwt_expire_minutes = int(os.getenv("JWT_EXPIRE_MINUTES", str(self.jwt_expire_minutes)))
108
+
109
+ self.enable_token_system = os.getenv("ENABLE_TOKEN_SYSTEM", str(self.enable_token_system)).lower() == "true"
110
+ self.free_tier_requests_per_day = int(os.getenv("FREE_TIER_REQUESTS_PER_DAY", str(self.free_tier_requests_per_day)))
111
+ self.premium_tier_requests_per_day = int(os.getenv("PREMIUM_TIER_REQUESTS_PER_DAY", str(self.premium_tier_requests_per_day)))
112
+
113
+ self.log_level = os.getenv("LOG_LEVEL", self.log_level)
114
+ self.log_format = os.getenv("LOG_FORMAT", self.log_format)
115
+
116
+ self.enable_metrics = os.getenv("ENABLE_METRICS", str(self.enable_metrics)).lower() == "true"
117
+ self.metrics_port = int(os.getenv("METRICS_PORT", str(self.metrics_port)))
118
+
119
+ self.enable_redis = os.getenv("ENABLE_REDIS", str(self.enable_redis)).lower() == "true"
120
+ self.redis_host = os.getenv("REDIS_HOST", self.redis_host)
121
+ self.redis_port = int(os.getenv("REDIS_PORT", str(self.redis_port)))
122
+ self.redis_db = int(os.getenv("REDIS_DB", str(self.redis_db)))
123
+ self.redis_password = os.getenv("REDIS_PASSWORD", self.redis_password)
124
+
125
+ # Hugging Face detection and settings
126
+ self.space_id = os.getenv("SPACE_ID", "")
127
+ self.hf_space = os.getenv("HF_SPACE", str(self.hf_space)).lower() == "true"
128
+ self.hf_cache_dir = os.getenv("HF_CACHE_DIR", self.hf_cache_dir)
129
+
130
+ # Determine if this is a Hugging Face Space
131
+ self.is_huggingface_space = bool(self.space_id.strip()) or self.hf_space
132
+
133
+ # Apply HF-specific optimizations
134
+ if self.is_huggingface_space:
135
+ self._apply_hf_optimizations()
136
+
137
+ def _apply_hf_optimizations(self):
138
+ """Apply Hugging Face Spaces specific optimizations."""
139
+ # Set HF environment variables
140
+ os.environ["HF_HOME"] = self.hf_cache_dir
141
+ os.environ["TRANSFORMERS_CACHE"] = f"{self.hf_cache_dir}/transformers"
142
+ os.environ["HF_DATASETS_CACHE"] = f"{self.hf_cache_dir}/datasets"
143
+
144
+ # Optimize for HF Spaces
145
+ if self.workers > 1:
146
+ self.workers = 1 # HF Spaces work better with single worker
147
+
148
+ # Conservative rate limiting for HF
149
+ if self.rate_limit_requests > 15:
150
+ self.rate_limit_requests = 15
151
+
152
+ if self.max_concurrent_requests > 5:
153
+ self.max_concurrent_requests = 5
154
+
155
+ # Smaller cache sizes for HF
156
+ if self.model_cache_size > 5:
157
+ self.model_cache_size = 5
158
+
159
+ if self.queue_max_size > 25:
160
+ self.queue_max_size = 25
161
+
162
+ def get_rate_limit_info(self) -> Dict[str, Any]:
163
+ """Get rate limit information for API responses."""
164
+ return {
165
+ "requests_per_minute": self.rate_limit_requests,
166
+ "window_seconds": self.rate_limit_window,
167
+ "concurrent_limit": self.max_concurrent_requests,
168
+ "file_size_limit_mb": self.max_upload_mb
169
+ }
170
+
171
+ def get_model_info(self) -> Dict[str, Any]:
172
+ """Get model information for API responses."""
173
+ return {
174
+ "warmup_on_startup": self.model_warmup_on_startup,
175
+ "cache_size": self.model_cache_size,
176
+ "workers": self.num_workers
177
+ }
178
+
179
+ def get_security_info(self) -> Dict[str, Any]:
180
+ """Get security information for API responses."""
181
+ return {
182
+ "authentication_enabled": self.enable_auth,
183
+ "cors_enabled": True,
184
+ "rate_limiting_enabled": True,
185
+ "file_validation_enabled": True
186
+ }
187
+
188
+ def get_hf_info(self) -> Dict[str, Any]:
189
+ """Get Hugging Face specific information."""
190
+ return {
191
+ "is_hf_space": self.is_huggingface_space,
192
+ "space_id": self.space_id,
193
+ "cache_dir": self.hf_cache_dir,
194
+ "optimizations_applied": self.is_huggingface_space
195
+ }
196
+
197
+ def validate(self) -> List[str]:
198
+ """Validate configuration and return list of warnings/errors."""
199
+ warnings = []
200
+
201
+ if self.rate_limit_requests < 1:
202
+ warnings.append("RATE_LIMIT_REQUESTS should be at least 1")
203
+
204
+ if self.max_concurrent_requests < 1:
205
+ warnings.append("MAX_CONCURRENT_REQUESTS should be at least 1")
206
+
207
+ if self.max_upload_mb < 1:
208
+ warnings.append("MAX_UPLOAD_MB should be at least 1")
209
+
210
+ if self.workers < 1:
211
+ warnings.append("WORKERS should be at least 1")
212
+
213
+ if self.is_huggingface_space and self.workers > 1:
214
+ warnings.append("Multiple workers not recommended in Hugging Face Spaces")
215
+
216
+ if self.is_huggingface_space and self.rate_limit_requests > 20:
217
+ warnings.append("High rate limits may cause issues in Hugging Face Spaces")
218
+
219
+ return warnings
220
+
221
+ # Global configuration instance
222
+ config = APIConfig()
223
+
224
+ # Validate configuration on import
225
+ if __name__ == "__main__":
226
+ warnings = config.validate()
227
+ if warnings:
228
+ print("Configuration warnings:")
229
+ for warning in warnings:
230
+ print(f" - {warning}")
231
+ else:
232
+ print("Configuration is valid!")
233
+
234
+ print(f"\nCurrent configuration:")
235
+ print(f" - Rate limit: {config.rate_limit_requests} requests per {config.rate_limit_window} seconds")
236
+ print(f" - Concurrent limit: {config.max_concurrent_requests} requests")
237
+ print(f" - File size limit: {config.max_upload_mb}MB")
238
+ print(f" - Workers: {config.workers}")
239
+ print(f" - Background workers: {config.num_workers}")
240
+ print(f" - Hugging Face Space: {config.is_huggingface_space}")
241
+ if config.is_huggingface_space:
242
+ print(f" - Space ID: {config.space_id}")
243
+ print(f" - Cache dir: {config.hf_cache_dir}")
env.example ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Loomi Clothing Detection API Configuration - HF Optimized
2
+ # Copy this to .env and modify as needed
3
+
4
+ # API Settings
5
+ HOST=0.0.0.0
6
+ PORT=7860
7
+ WORKERS=1
8
+ RELOAD=false
9
+
10
+ # File Upload Settings
11
+ MAX_UPLOAD_MB=15
12
+ ALLOWED_CONTENT_TYPES=image/jpeg,image/png,image/webp
13
+
14
+ # Rate Limiting (HF-optimized)
15
+ RATE_LIMIT_REQUESTS=15
16
+ RATE_LIMIT_WINDOW=60
17
+ MAX_CONCURRENT_REQUESTS=5
18
+
19
+ # Model Settings (HF-optimized)
20
+ MODEL_WARMUP_ON_STARTUP=true
21
+ MODEL_CACHE_SIZE=5
22
+
23
+ # Queue Settings (HF-optimized)
24
+ NUM_WORKERS=1
25
+ QUEUE_MAX_SIZE=25
26
+
27
+ # CORS Settings
28
+ ALLOWED_ORIGINS=*
29
+
30
+ # Security Settings
31
+ ENABLE_AUTH=false
32
+ JWT_SECRET=your-secret-key-change-in-production
33
+ JWT_ALGORITHM=HS256
34
+ JWT_EXPIRE_MINUTES=60
35
+
36
+ # Token System Settings (Future)
37
+ ENABLE_TOKEN_SYSTEM=false
38
+ FREE_TIER_REQUESTS_PER_DAY=100
39
+ PREMIUM_TIER_REQUESTS_PER_DAY=1000
40
+
41
+ # Logging Settings
42
+ LOG_LEVEL=INFO
43
+ LOG_FORMAT=%(asctime)s - %(name)s - %(levelname)s - %(message)s
44
+
45
+ # Monitoring Settings
46
+ ENABLE_METRICS=false
47
+ METRICS_PORT=8000
48
+
49
+ # Cache Settings
50
+ ENABLE_REDIS=false
51
+ REDIS_HOST=localhost
52
+ REDIS_PORT=6379
53
+ REDIS_DB=0
54
+ REDIS_PASSWORD=
55
+
56
+ # Hugging Face Spaces Settings
57
+ SPACE_ID=your-username/your-space-name
58
+ HF_SPACE=true
59
+ HF_CACHE_DIR=/tmp/hf_cache
model_manager.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Async model management for the Loomi Clothing Detection API.
3
+ """
4
+ import asyncio
5
+ import logging
6
+ from config import config
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+ class AsyncModelManager:
11
+ """Manages asynchronous loading of the ML model."""
12
+
13
+ def __init__(self):
14
+ self.model_loaded = False
15
+ self.model_loading = False
16
+ self.loading_task = None
17
+ self._lock = asyncio.Lock()
18
+
19
+ async def ensure_model_loaded(self):
20
+ """Ensure model is loaded, load it asynchronously if needed."""
21
+ if self.model_loaded:
22
+ return
23
+
24
+ async with self._lock:
25
+ if self.model_loaded:
26
+ return
27
+
28
+ if self.model_loading:
29
+ # Wait for existing loading task
30
+ if self.loading_task:
31
+ await self.loading_task
32
+ return
33
+
34
+ # Start loading task
35
+ self.model_loading = True
36
+ self.loading_task = asyncio.create_task(self._load_model())
37
+ await self.loading_task
38
+
39
+ async def _load_model(self):
40
+ """Load model in background thread."""
41
+ try:
42
+ logger.info("Starting model loading in background...")
43
+
44
+ # Run model loading in thread pool to avoid blocking
45
+ loop = asyncio.get_event_loop()
46
+ await loop.run_in_executor(None, self._load_model_sync)
47
+
48
+ self.model_loaded = True
49
+ logger.info("Model loaded successfully!")
50
+
51
+ except Exception as e:
52
+ logger.error(f"Failed to load model: {e}")
53
+ self.model_loading = False
54
+ finally:
55
+ self.model_loading = False
56
+
57
+ def _load_model_sync(self):
58
+ """Synchronous model loading (runs in thread pool)."""
59
+ try:
60
+ from clothing_detector import get_clothing_detector
61
+ detector = get_clothing_detector()
62
+ logger.info("Model loaded in background thread")
63
+ except Exception as e:
64
+ logger.error(f"Error loading model: {e}")
65
+ raise
66
+
67
+ def get_status(self) -> dict:
68
+ """Get current model status for health checks."""
69
+ return {
70
+ "model_loaded": self.model_loaded,
71
+ "model_loading": self.model_loading
72
+ }
73
+
74
+ # Global model manager instance
75
+ model_manager = AsyncModelManager()
rate_limiter.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Rate limiting and user management for the Loomi Clothing Detection API.
3
+ """
4
+ import asyncio
5
+ import time
6
+ from typing import Dict, List
7
+ from collections import defaultdict
8
+ from dataclasses import dataclass
9
+ from config import config
10
+
11
+ @dataclass
12
+ class UserRequest:
13
+ """Represents a user request for tracking."""
14
+ timestamp: float
15
+ endpoint: str
16
+ file_size: int
17
+
18
+ class RateLimiter:
19
+ """Manages rate limiting and concurrent request tracking per user."""
20
+
21
+ def __init__(self):
22
+ self.user_requests: Dict[str, List[UserRequest]] = defaultdict(list)
23
+ self.user_concurrent: Dict[str, int] = defaultdict(int)
24
+ self.lock = asyncio.Lock()
25
+
26
+ async def check_rate_limit(self, user_id: str, endpoint: str) -> bool:
27
+ """Check if user has exceeded rate limit."""
28
+ async with self.lock:
29
+ now = time.time()
30
+ user_reqs = self.user_requests[user_id]
31
+
32
+ # Remove old requests outside the window
33
+ user_reqs = [req for req in user_reqs if now - req.timestamp < config.rate_limit_window]
34
+ self.user_requests[user_id] = user_reqs
35
+
36
+ # Check rate limit
37
+ return len(user_reqs) < config.rate_limit_requests
38
+
39
+ async def check_concurrent_limit(self, user_id: str) -> bool:
40
+ """Check if user has exceeded concurrent request limit."""
41
+ async with self.lock:
42
+ return self.user_concurrent[user_id] < config.max_concurrent_requests
43
+
44
+ async def add_request(self, user_id: str, endpoint: str, file_size: int):
45
+ """Add a new request to user's history."""
46
+ async with self.lock:
47
+ now = time.time()
48
+ self.user_requests[user_id].append(UserRequest(now, endpoint, file_size))
49
+ self.user_concurrent[user_id] += 1
50
+
51
+ async def remove_request(self, user_id: str):
52
+ """Remove a completed request from concurrent count."""
53
+ async with self.lock:
54
+ if self.user_concurrent[user_id] > 0:
55
+ self.user_concurrent[user_id] -= 1
56
+
57
+ def get_user_stats(self, user_id: str) -> Dict:
58
+ """Get user statistics for API responses."""
59
+ now = time.time()
60
+ user_reqs = self.user_requests[user_id]
61
+ concurrent = self.user_concurrent[user_id]
62
+
63
+ # Calculate usage in current window
64
+ window_start = now - config.rate_limit_window
65
+ requests_in_window = len([req for req in user_reqs if req.timestamp >= window_start])
66
+
67
+ return {
68
+ "user_id": user_id,
69
+ "requests_in_window": requests_in_window,
70
+ "requests_limit": config.rate_limit_requests,
71
+ "concurrent_requests": concurrent,
72
+ "concurrent_limit": config.max_concurrent_requests,
73
+ "window_remaining": config.rate_limit_window - (now - window_start),
74
+ "total_requests_today": len([req for req in user_reqs if req.timestamp >= now - 86400])
75
+ }
76
+
77
+ # Global rate limiter instance
78
+ rate_limiter = RateLimiter()
request_queue.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Request queue management for the Loomi Clothing Detection API.
3
+ """
4
+ import asyncio
5
+ import logging
6
+ from typing import Any, Callable
7
+ from config import config
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+ class RequestQueue:
12
+ """Manages background processing of heavy API requests."""
13
+
14
+ def __init__(self):
15
+ self.queue = asyncio.Queue()
16
+ self.processing = False
17
+ self.workers = []
18
+
19
+ async def start_workers(self, num_workers: int = None):
20
+ """Start background workers."""
21
+ if num_workers is None:
22
+ num_workers = config.num_workers
23
+
24
+ for i in range(num_workers):
25
+ worker = asyncio.create_task(self._worker(f"worker-{i}"))
26
+ self.workers.append(worker)
27
+ logger.info(f"Started {num_workers} background workers")
28
+
29
+ async def _worker(self, name: str):
30
+ """Background worker for processing requests."""
31
+ logger.info(f"Worker {name} started")
32
+ while True:
33
+ try:
34
+ task = await self.queue.get()
35
+ if task is None: # Shutdown signal
36
+ break
37
+
38
+ user_id, endpoint, process_func, args, future = task
39
+ try:
40
+ # Process the request
41
+ result = await process_func(*args)
42
+ future.set_result(result)
43
+ except Exception as e:
44
+ future.set_exception(e)
45
+ finally:
46
+ self.queue.task_done()
47
+
48
+ except Exception as e:
49
+ logger.error(f"Worker {name} error: {e}")
50
+
51
+ async def submit_task(self, user_id: str, endpoint: str, process_func: Callable, *args) -> Any:
52
+ """Submit a task to the queue."""
53
+ future = asyncio.Future()
54
+ await self.queue.put((user_id, endpoint, process_func, args, future))
55
+ return await future
56
+
57
+ async def shutdown(self):
58
+ """Shutdown workers."""
59
+ for _ in self.workers:
60
+ await self.queue.put(None)
61
+ await asyncio.gather(*self.workers, return_exceptions=True)
62
+
63
+ def get_status(self) -> dict:
64
+ """Get current queue status for health checks."""
65
+ return {
66
+ "queue_size": self.queue.qsize(),
67
+ "active_workers": len(self.workers)
68
+ }
69
+
70
+ # Global request queue instance
71
+ request_queue = RequestQueue()
requirements.txt CHANGED
@@ -1,12 +1,28 @@
 
1
  fastapi
2
  uvicorn[standard]
3
- pillow
4
  python-multipart
5
- numpy
6
- scikit-learn
 
7
  opencv-python-headless
 
 
 
8
  transformers
9
  torch
10
  torchvision
11
- rembg
12
- onnxruntime
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Core FastAPI dependencies
2
  fastapi
3
  uvicorn[standard]
 
4
  python-multipart
5
+
6
+ # Image processing
7
+ pillow
8
  opencv-python-headless
9
+ rembg
10
+
11
+ # Machine Learning
12
  transformers
13
  torch
14
  torchvision
15
+ onnxruntime
16
+
17
+ # Data processing
18
+ numpy
19
+ scikit-learn
20
+
21
+ # Configuration and utilities
22
+ python-dotenv
23
+
24
+ # Optional dependencies (uncomment if needed)
25
+ # redis # For Redis caching
26
+ # celery # For task queue
27
+ # prometheus-client # For metrics
28
+ # structlog # For structured logging
start.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Startup script for Loomi Clothing Detection API
4
+ Supports different deployment scenarios and configurations
5
+ """
6
+
7
+ import os
8
+ import sys
9
+ import argparse
10
+ import uvicorn
11
+ from config import config
12
+
13
+ def main():
14
+ parser = argparse.ArgumentParser(description="Start Loomi Clothing Detection API")
15
+ parser.add_argument("--host", default=config.host, help="Host to bind to")
16
+ parser.add_argument("--port", type=int, default=config.port, help="Port to bind to")
17
+ parser.add_argument("--workers", type=int, default=config.workers, help="Number of worker processes")
18
+ parser.add_argument("--reload", action="store_true", help="Enable auto-reload for development")
19
+ parser.add_argument("--config", help="Path to .env file")
20
+ parser.add_argument("--huggingface", action="store_true", help="Optimize for Hugging Face Spaces")
21
+
22
+ args = parser.parse_args()
23
+
24
+ # Load environment variables if specified
25
+ if args.config:
26
+ from dotenv import load_dotenv
27
+ load_dotenv(args.config)
28
+ # Reload config after loading .env
29
+ from importlib import reload
30
+ import config
31
+ reload(config)
32
+ config = config.config
33
+
34
+ # Hugging Face Spaces optimization
35
+ if args.huggingface:
36
+ print("🚀 Optimizing for Hugging Face Spaces...")
37
+ os.environ["WORKERS"] = "1"
38
+ os.environ["NUM_WORKERS"] = "1"
39
+ os.environ["MODEL_WARMUP_ON_STARTUP"] = "true"
40
+ args.workers = 1
41
+ args.reload = False
42
+
43
+ # Validate configuration
44
+ warnings = config.validate()
45
+ if warnings:
46
+ print("⚠️ Configuration warnings:")
47
+ for warning in warnings:
48
+ print(f" - {warning}")
49
+ print()
50
+
51
+ # Print startup information
52
+ print("🎯 Loomi Clothing Detection API")
53
+ print(f" Version: {config.version}")
54
+ print(f" Host: {args.host}:{args.port}")
55
+ print(f" Workers: {args.workers}")
56
+ print(f" Background workers: {config.num_workers}")
57
+ print(f" Rate limit: {config.rate_limit_requests} req/min")
58
+ print(f" Concurrent limit: {config.max_concurrent_requests}")
59
+ print(f" File size limit: {config.max_upload_mb}MB")
60
+ print(f" Hugging Face Space: {config.is_huggingface_space}")
61
+ print()
62
+
63
+ # Start the server
64
+ try:
65
+ uvicorn.run(
66
+ "main:app",
67
+ host=args.host,
68
+ port=args.port,
69
+ workers=args.workers if not args.reload else 1,
70
+ reload=args.reload,
71
+ log_level=config.log_level.lower(),
72
+ access_log=True,
73
+ use_colors=True
74
+ )
75
+ except KeyboardInterrupt:
76
+ print("\n👋 Shutting down gracefully...")
77
+ except Exception as e:
78
+ print(f"❌ Error starting server: {e}")
79
+ sys.exit(1)
80
+
81
+ if __name__ == "__main__":
82
+ main()
user_manager.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ User management and identification for the Loomi Clothing Detection API.
3
+ """
4
+ import hashlib
5
+ from typing import Optional
6
+ from fastapi import Request, Depends
7
+ from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
8
+
9
+ # Security
10
+ security = HTTPBearer(auto_error=False)
11
+
12
+ def get_user_id(request: Request, credentials: Optional[HTTPAuthorizationCredentials] = Depends(security)) -> str:
13
+ """
14
+ Extract user ID from request.
15
+ In production, validate JWT token.
16
+ """
17
+ if credentials:
18
+ # In production, decode JWT and extract user_id
19
+ return f"user_{hashlib.md5(credentials.credentials.encode()).hexdigest()[:8]}"
20
+
21
+ # Fallback: use IP address + User-Agent hash
22
+ client_ip = request.client.host
23
+ user_agent = request.headers.get("user-agent", "")
24
+ user_hash = hashlib.md5(f"{client_ip}:{user_agent}".encode()).hexdigest()[:8]
25
+ return f"anon_{user_hash}"