Commit Β·
2c97e18
0
Parent(s):
Initial commit
Browse files- .dockerignore +140 -0
- .gitattributes +41 -0
- .github/workflows/docker.yml +110 -0
- .gitignore +190 -0
- .python-version +1 -0
- Dockerfile +67 -0
- LICENSE +21 -0
- README.md +11 -0
- app/api/admin/manage.py +1016 -0
- app/api/v1/chat.py +107 -0
- app/api/v1/images.py +53 -0
- app/api/v1/models.py +114 -0
- app/core/auth.py +66 -0
- app/core/config.py +243 -0
- app/core/exception.py +119 -0
- app/core/logger.py +141 -0
- app/core/proxy_pool.py +170 -0
- app/core/storage.py +644 -0
- app/models/grok_models.py +163 -0
- app/models/openai_schema.py +106 -0
- app/services/api_keys.py +226 -0
- app/services/grok/cache.py +243 -0
- app/services/grok/client.py +386 -0
- app/services/grok/create.py +140 -0
- app/services/grok/processer.py +430 -0
- app/services/grok/statsig.py +82 -0
- app/services/grok/token.py +649 -0
- app/services/grok/upload.py +250 -0
- app/services/images/normalize.py +100 -0
- app/services/mcp/__init__.py +6 -0
- app/services/mcp/server.py +63 -0
- app/services/mcp/tools.py +77 -0
- app/services/request_logger.py +152 -0
- app/services/request_stats.py +205 -0
- app/template/admin.html +0 -0
- app/template/favicon.png +3 -0
- app/template/login.html +76 -0
- data/setting.toml +25 -0
- data/temp/image.temp +0 -0
- data/token.json +4 -0
- docker-compose.yml +25 -0
- docker-entrypoint.sh +74 -0
- main.py +196 -0
- pyproject.toml +26 -0
- readme.md +248 -0
- requirements.txt +18 -0
- test/test_concurrency.py +276 -0
- test/test_concurrency.sh +177 -0
- test_key.py +50 -0
- uv.lock +0 -0
.dockerignore
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python runtime and cache files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
*.so
|
| 6 |
+
.Python
|
| 7 |
+
*.egg-info/
|
| 8 |
+
dist/
|
| 9 |
+
build/
|
| 10 |
+
*.egg
|
| 11 |
+
.eggs/
|
| 12 |
+
pip-log.txt
|
| 13 |
+
pip-delete-this-directory.txt
|
| 14 |
+
|
| 15 |
+
# Virtual environments
|
| 16 |
+
venv/
|
| 17 |
+
env/
|
| 18 |
+
ENV/
|
| 19 |
+
.venv/
|
| 20 |
+
.virtualenv/
|
| 21 |
+
pipenv/
|
| 22 |
+
poetry.lock
|
| 23 |
+
Pipfile.lock
|
| 24 |
+
|
| 25 |
+
# IDEs and editors
|
| 26 |
+
.vscode/
|
| 27 |
+
.idea/
|
| 28 |
+
*.swp
|
| 29 |
+
*.swo
|
| 30 |
+
*.swn
|
| 31 |
+
.DS_Store
|
| 32 |
+
*~
|
| 33 |
+
.project
|
| 34 |
+
.classpath
|
| 35 |
+
.settings/
|
| 36 |
+
*.sublime-project
|
| 37 |
+
*.sublime-workspace
|
| 38 |
+
|
| 39 |
+
# Git
|
| 40 |
+
.git/
|
| 41 |
+
.gitignore
|
| 42 |
+
.gitattributes
|
| 43 |
+
.gitmodules
|
| 44 |
+
|
| 45 |
+
# Docker related
|
| 46 |
+
Dockerfile*
|
| 47 |
+
docker-compose*.yml
|
| 48 |
+
.dockerignore
|
| 49 |
+
.docker/
|
| 50 |
+
|
| 51 |
+
# Documentation
|
| 52 |
+
*.md
|
| 53 |
+
!data/*.md
|
| 54 |
+
README.md
|
| 55 |
+
docs/
|
| 56 |
+
LICENSE
|
| 57 |
+
CHANGELOG*
|
| 58 |
+
CONTRIBUTING*
|
| 59 |
+
|
| 60 |
+
# Tests and quality checks
|
| 61 |
+
tests/
|
| 62 |
+
test/
|
| 63 |
+
*.pytest_cache/
|
| 64 |
+
.coverage
|
| 65 |
+
.coverage.*
|
| 66 |
+
htmlcov/
|
| 67 |
+
.tox/
|
| 68 |
+
.mypy_cache/
|
| 69 |
+
.ruff_cache/
|
| 70 |
+
.pytest_cache/
|
| 71 |
+
.hypothesis/
|
| 72 |
+
.pytype/
|
| 73 |
+
coverage.xml
|
| 74 |
+
*.cover
|
| 75 |
+
.cache/
|
| 76 |
+
nosetests.xml
|
| 77 |
+
|
| 78 |
+
# Logs and runtime data
|
| 79 |
+
logs/
|
| 80 |
+
*.log
|
| 81 |
+
*.log.*
|
| 82 |
+
|
| 83 |
+
# Temporary data files
|
| 84 |
+
data/temp/
|
| 85 |
+
data/token.json
|
| 86 |
+
|
| 87 |
+
# Database files
|
| 88 |
+
*.db
|
| 89 |
+
*.sqlite
|
| 90 |
+
*.sqlite3
|
| 91 |
+
|
| 92 |
+
# Temporary files
|
| 93 |
+
tmp/
|
| 94 |
+
temp/
|
| 95 |
+
*.tmp
|
| 96 |
+
*.temp
|
| 97 |
+
*.bak
|
| 98 |
+
*.orig
|
| 99 |
+
*.rej
|
| 100 |
+
*.swp
|
| 101 |
+
|
| 102 |
+
# CI/CD
|
| 103 |
+
.github/
|
| 104 |
+
.gitlab-ci.yml
|
| 105 |
+
.travis.yml
|
| 106 |
+
.circleci/
|
| 107 |
+
azure-pipelines.yml
|
| 108 |
+
.jenkins/
|
| 109 |
+
Jenkinsfile
|
| 110 |
+
|
| 111 |
+
# Environment variables and secrets
|
| 112 |
+
.env
|
| 113 |
+
.env.*
|
| 114 |
+
*.key
|
| 115 |
+
*.pem
|
| 116 |
+
*.crt
|
| 117 |
+
secrets/
|
| 118 |
+
|
| 119 |
+
# Media and large files
|
| 120 |
+
*.mp4
|
| 121 |
+
*.avi
|
| 122 |
+
*.mov
|
| 123 |
+
*.zip
|
| 124 |
+
*.tar
|
| 125 |
+
*.tar.gz
|
| 126 |
+
*.rar
|
| 127 |
+
|
| 128 |
+
# Node.js
|
| 129 |
+
node_modules/
|
| 130 |
+
npm-debug.log*
|
| 131 |
+
yarn-debug.log*
|
| 132 |
+
yarn-error.log*
|
| 133 |
+
package-lock.json
|
| 134 |
+
yarn.lock
|
| 135 |
+
|
| 136 |
+
# Other
|
| 137 |
+
*.pyc
|
| 138 |
+
.Python
|
| 139 |
+
.sass-cache/
|
| 140 |
+
.ipynb_checkpoints/
|
.gitattributes
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Auto detect text files and perform LF normalization
|
| 2 |
+
* text=auto
|
| 3 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
app/template/favicon.png filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
data/temp/video/*.mp4 filter=lfs diff=lfs merge=lfs -text
|
.github/workflows/docker.yml
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Build Docker Image
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches:
|
| 6 |
+
- main
|
| 7 |
+
tags:
|
| 8 |
+
- 'v*'
|
| 9 |
+
pull_request:
|
| 10 |
+
branches:
|
| 11 |
+
- main
|
| 12 |
+
|
| 13 |
+
env:
|
| 14 |
+
REGISTRY: ghcr.io
|
| 15 |
+
IMAGE_NAME: ${{ github.repository }}
|
| 16 |
+
|
| 17 |
+
jobs:
|
| 18 |
+
build-and-push:
|
| 19 |
+
runs-on: ubuntu-latest
|
| 20 |
+
permissions:
|
| 21 |
+
contents: read
|
| 22 |
+
packages: write
|
| 23 |
+
id-token: write
|
| 24 |
+
|
| 25 |
+
strategy:
|
| 26 |
+
fail-fast: false
|
| 27 |
+
matrix:
|
| 28 |
+
include:
|
| 29 |
+
# AMD64 platform
|
| 30 |
+
- platform: linux/amd64
|
| 31 |
+
suffix: amd64
|
| 32 |
+
# ARM64 platform
|
| 33 |
+
- platform: linux/arm64
|
| 34 |
+
suffix: arm64
|
| 35 |
+
|
| 36 |
+
steps:
|
| 37 |
+
- name: Checkout repository
|
| 38 |
+
uses: actions/checkout@v4
|
| 39 |
+
|
| 40 |
+
- name: Set up Docker Buildx
|
| 41 |
+
uses: docker/setup-buildx-action@v3
|
| 42 |
+
|
| 43 |
+
- name: Log in to Container Registry
|
| 44 |
+
uses: docker/login-action@v3
|
| 45 |
+
with:
|
| 46 |
+
registry: ${{ env.REGISTRY }}
|
| 47 |
+
username: ${{ github.actor }}
|
| 48 |
+
password: ${{ secrets.GITHUB_TOKEN }}
|
| 49 |
+
|
| 50 |
+
- name: Extract metadata
|
| 51 |
+
id: meta
|
| 52 |
+
uses: docker/metadata-action@v5
|
| 53 |
+
with:
|
| 54 |
+
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
| 55 |
+
tags: |
|
| 56 |
+
# Branch push
|
| 57 |
+
type=ref,event=branch,suffix=-${{ matrix.suffix }}
|
| 58 |
+
# Semver: full version
|
| 59 |
+
type=semver,pattern={{version}},suffix=-${{ matrix.suffix }}
|
| 60 |
+
# latest tag
|
| 61 |
+
type=raw,value=latest-${{ matrix.suffix }},enable={{is_default_branch}}
|
| 62 |
+
|
| 63 |
+
- name: Build and push Docker image
|
| 64 |
+
uses: docker/build-push-action@v5
|
| 65 |
+
with:
|
| 66 |
+
context: .
|
| 67 |
+
platforms: ${{ matrix.platform }}
|
| 68 |
+
push: ${{ github.event_name != 'pull_request' }}
|
| 69 |
+
tags: ${{ steps.meta.outputs.tags }}
|
| 70 |
+
labels: ${{ steps.meta.outputs.labels }}
|
| 71 |
+
cache-from: type=gha,scope=${{ matrix.suffix }}
|
| 72 |
+
cache-to: type=gha,mode=max,scope=${{ matrix.suffix }}
|
| 73 |
+
pull: true
|
| 74 |
+
|
| 75 |
+
# Merge multi-arch images into a unified tag
|
| 76 |
+
merge-manifests:
|
| 77 |
+
runs-on: ubuntu-latest
|
| 78 |
+
needs: build-and-push
|
| 79 |
+
if: github.event_name != 'pull_request'
|
| 80 |
+
permissions:
|
| 81 |
+
contents: read
|
| 82 |
+
packages: write
|
| 83 |
+
|
| 84 |
+
steps:
|
| 85 |
+
- name: Log in to Container Registry
|
| 86 |
+
uses: docker/login-action@v3
|
| 87 |
+
with:
|
| 88 |
+
registry: ${{ env.REGISTRY }}
|
| 89 |
+
username: ${{ github.actor }}
|
| 90 |
+
password: ${{ secrets.GITHUB_TOKEN }}
|
| 91 |
+
|
| 92 |
+
- name: Extract metadata
|
| 93 |
+
id: meta
|
| 94 |
+
uses: docker/metadata-action@v5
|
| 95 |
+
with:
|
| 96 |
+
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
| 97 |
+
tags: |
|
| 98 |
+
type=ref,event=branch
|
| 99 |
+
type=semver,pattern={{version}}
|
| 100 |
+
type=raw,value=latest,enable={{is_default_branch}}
|
| 101 |
+
|
| 102 |
+
- name: Create and push manifest
|
| 103 |
+
run: |
|
| 104 |
+
TAGS="${{ steps.meta.outputs.tags }}"
|
| 105 |
+
for tag in $TAGS; do
|
| 106 |
+
echo "Merging tag: $tag"
|
| 107 |
+
docker buildx imagetools create -t $tag \
|
| 108 |
+
${tag}-amd64 \
|
| 109 |
+
${tag}-arm64
|
| 110 |
+
done
|
.gitignore
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
.idea
|
| 6 |
+
|
| 7 |
+
# C extensions
|
| 8 |
+
*.so
|
| 9 |
+
|
| 10 |
+
# Distribution / packaging
|
| 11 |
+
.Python
|
| 12 |
+
build/
|
| 13 |
+
develop-eggs/
|
| 14 |
+
dist/
|
| 15 |
+
downloads/
|
| 16 |
+
eggs/
|
| 17 |
+
.eggs/
|
| 18 |
+
lib/
|
| 19 |
+
lib64/
|
| 20 |
+
parts/
|
| 21 |
+
sdist/
|
| 22 |
+
var/
|
| 23 |
+
wheels/
|
| 24 |
+
share/python-wheels/
|
| 25 |
+
*.egg-info/
|
| 26 |
+
.installed.cfg
|
| 27 |
+
*.egg
|
| 28 |
+
MANIFEST
|
| 29 |
+
|
| 30 |
+
# PyInstaller
|
| 31 |
+
# Usually these files are written by a python script from a template
|
| 32 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 33 |
+
*.manifest
|
| 34 |
+
*.spec
|
| 35 |
+
|
| 36 |
+
# Installer logs
|
| 37 |
+
pip-log.txt
|
| 38 |
+
pip-delete-this-directory.txt
|
| 39 |
+
|
| 40 |
+
# Unit test / coverage reports
|
| 41 |
+
htmlcov/
|
| 42 |
+
.tox/
|
| 43 |
+
.nox/
|
| 44 |
+
.coverage
|
| 45 |
+
.coverage.*
|
| 46 |
+
.cache
|
| 47 |
+
nosetests.xml
|
| 48 |
+
coverage.xml
|
| 49 |
+
*.cover
|
| 50 |
+
*.py,cover
|
| 51 |
+
.hypothesis/
|
| 52 |
+
.pytest_cache/
|
| 53 |
+
cover/
|
| 54 |
+
|
| 55 |
+
# Translations
|
| 56 |
+
*.mo
|
| 57 |
+
*.pot
|
| 58 |
+
|
| 59 |
+
# Django stuff:
|
| 60 |
+
*.log
|
| 61 |
+
local_settings.py
|
| 62 |
+
db.sqlite3
|
| 63 |
+
db.sqlite3-journal
|
| 64 |
+
|
| 65 |
+
# Flask stuff:
|
| 66 |
+
instance/
|
| 67 |
+
.webassets-cache
|
| 68 |
+
|
| 69 |
+
# Scrapy stuff:
|
| 70 |
+
.scrapy
|
| 71 |
+
|
| 72 |
+
# Sphinx documentation
|
| 73 |
+
docs/_build/
|
| 74 |
+
|
| 75 |
+
# PyBuilder
|
| 76 |
+
.pybuilder/
|
| 77 |
+
target/
|
| 78 |
+
|
| 79 |
+
# Jupyter Notebook
|
| 80 |
+
.ipynb_checkpoints
|
| 81 |
+
|
| 82 |
+
# IPython
|
| 83 |
+
profile_default/
|
| 84 |
+
ipython_config.py
|
| 85 |
+
|
| 86 |
+
# pyenv
|
| 87 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 88 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 89 |
+
# .python-version
|
| 90 |
+
|
| 91 |
+
# pipenv
|
| 92 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 93 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 94 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 95 |
+
# install all needed dependencies.
|
| 96 |
+
#Pipfile.lock
|
| 97 |
+
|
| 98 |
+
# UV
|
| 99 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
| 100 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 101 |
+
# commonly ignored for libraries.
|
| 102 |
+
#uv.lock
|
| 103 |
+
|
| 104 |
+
# poetry
|
| 105 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 106 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 107 |
+
# commonly ignored for libraries.
|
| 108 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 109 |
+
#poetry.lock
|
| 110 |
+
|
| 111 |
+
# pdm
|
| 112 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 113 |
+
#pdm.lock
|
| 114 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 115 |
+
# in version control.
|
| 116 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
| 117 |
+
.pdm.toml
|
| 118 |
+
.pdm-python
|
| 119 |
+
.pdm-build/
|
| 120 |
+
|
| 121 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 122 |
+
__pypackages__/
|
| 123 |
+
|
| 124 |
+
# Celery stuff
|
| 125 |
+
celerybeat-schedule
|
| 126 |
+
celerybeat.pid
|
| 127 |
+
|
| 128 |
+
# SageMath parsed files
|
| 129 |
+
*.sage.py
|
| 130 |
+
|
| 131 |
+
# Environments
|
| 132 |
+
.env
|
| 133 |
+
.venv
|
| 134 |
+
env/
|
| 135 |
+
venv/
|
| 136 |
+
ENV/
|
| 137 |
+
env.bak/
|
| 138 |
+
venv.bak/
|
| 139 |
+
|
| 140 |
+
logs/*
|
| 141 |
+
|
| 142 |
+
# Runtime data
|
| 143 |
+
data/*
|
| 144 |
+
|
| 145 |
+
# Spyder project settings
|
| 146 |
+
.spyderproject
|
| 147 |
+
.spyproject
|
| 148 |
+
|
| 149 |
+
# Rope project settings
|
| 150 |
+
.ropeproject
|
| 151 |
+
|
| 152 |
+
# mkdocs documentation
|
| 153 |
+
/site
|
| 154 |
+
|
| 155 |
+
# mypy
|
| 156 |
+
.mypy_cache/
|
| 157 |
+
.dmypy.json
|
| 158 |
+
dmypy.json
|
| 159 |
+
|
| 160 |
+
# Pyre type checker
|
| 161 |
+
.pyre/
|
| 162 |
+
|
| 163 |
+
# pytype static type analyzer
|
| 164 |
+
.pytype/
|
| 165 |
+
|
| 166 |
+
# Cython debug symbols
|
| 167 |
+
cython_debug/
|
| 168 |
+
|
| 169 |
+
# PyCharm
|
| 170 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 171 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 172 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 173 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 174 |
+
#.idea/
|
| 175 |
+
|
| 176 |
+
# Ruff stuff:
|
| 177 |
+
.ruff_cache/
|
| 178 |
+
|
| 179 |
+
# PyPI configuration file
|
| 180 |
+
.pypirc
|
| 181 |
+
|
| 182 |
+
# Cursor
|
| 183 |
+
# Cursor is an AI-powered code editor.`.cursorignore` specifies files/directories to
|
| 184 |
+
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
|
| 185 |
+
# refer to https://docs.cursor.com/context/ignore-files
|
| 186 |
+
.cursorignore
|
| 187 |
+
.cursorindexingignore
|
| 188 |
+
|
| 189 |
+
# temp videos
|
| 190 |
+
data/temp/video/
|
.python-version
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
3.13
|
Dockerfile
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim AS builder
|
| 2 |
+
|
| 3 |
+
WORKDIR /build
|
| 4 |
+
|
| 5 |
+
# Install system deps needed to build wheels
|
| 6 |
+
RUN apt-get update && \
|
| 7 |
+
apt-get install -y --no-install-recommends \
|
| 8 |
+
gcc \
|
| 9 |
+
g++ \
|
| 10 |
+
libffi-dev \
|
| 11 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 12 |
+
|
| 13 |
+
# Install dependencies into a staging prefix
|
| 14 |
+
COPY requirements.txt .
|
| 15 |
+
RUN pip install --no-cache-dir --prefix=/install --compile -r requirements.txt
|
| 16 |
+
|
| 17 |
+
# Remove common test and cache artifacts
|
| 18 |
+
RUN find /install -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true && \
|
| 19 |
+
find /install -type d -name "tests" -exec rm -rf {} + 2>/dev/null || true && \
|
| 20 |
+
find /install -type d -name "test" -exec rm -rf {} + 2>/dev/null || true && \
|
| 21 |
+
find /install -type d -name "*.dist-info" -exec sh -c 'rm -f "$1"/RECORD "$1"/INSTALLER' _ {} \; && \
|
| 22 |
+
find /install -type f -name "*.pyc" -delete && \
|
| 23 |
+
find /install -type f -name "*.pyo" -delete && \
|
| 24 |
+
find /install -name "*.so" -exec strip --strip-unneeded {} \; 2>/dev/null || true
|
| 25 |
+
|
| 26 |
+
FROM python:3.11-slim AS runtime
|
| 27 |
+
|
| 28 |
+
WORKDIR /app
|
| 29 |
+
|
| 30 |
+
# Install runtime system deps only
|
| 31 |
+
RUN apt-get update && \
|
| 32 |
+
apt-get install -y --no-install-recommends \
|
| 33 |
+
libffi8 \
|
| 34 |
+
ca-certificates \
|
| 35 |
+
&& rm -rf /var/lib/apt/lists/* \
|
| 36 |
+
&& rm -rf /tmp/* /var/tmp/* \
|
| 37 |
+
&& rm -rf /usr/share/doc/* \
|
| 38 |
+
&& rm -rf /usr/share/man/* \
|
| 39 |
+
&& rm -rf /var/cache/apt/*
|
| 40 |
+
|
| 41 |
+
# Copy dependencies from builder stage
|
| 42 |
+
COPY --from=builder /install /usr/local
|
| 43 |
+
|
| 44 |
+
# Create required directories
|
| 45 |
+
# RUN mkdir -p /app/logs /app/data/temp/image /app/data/temp/video
|
| 46 |
+
RUN mkdir -p /app/logs
|
| 47 |
+
|
| 48 |
+
# Copy application code and config
|
| 49 |
+
COPY app/ ./app/
|
| 50 |
+
COPY main.py .
|
| 51 |
+
|
| 52 |
+
# Copy and set entrypoint script
|
| 53 |
+
COPY docker-entrypoint.sh /usr/local/bin/
|
| 54 |
+
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
| 55 |
+
|
| 56 |
+
# Disable Python bytecode and enable unbuffered output
|
| 57 |
+
ENV PYTHONDONTWRITEBYTECODE=1 \
|
| 58 |
+
PYTHONUNBUFFERED=1
|
| 59 |
+
|
| 60 |
+
EXPOSE 8000
|
| 61 |
+
|
| 62 |
+
# Initialize config via entrypoint
|
| 63 |
+
ENTRYPOINT ["docker-entrypoint.sh"]
|
| 64 |
+
|
| 65 |
+
# Default command
|
| 66 |
+
CMD ["sh", "-c", "uvicorn main:app --host 0.0.0.0 --port ${PORT:-7860}"]
|
| 67 |
+
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 Chenyme
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Grok2api Private
|
| 3 |
+
emoji: π
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: gray
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
short_description: Grok2API private deploy
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app/api/admin/manage.py
ADDED
|
@@ -0,0 +1,1016 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Admin API - token management and system settings"""
|
| 2 |
+
|
| 3 |
+
import secrets
|
| 4 |
+
import time
|
| 5 |
+
from typing import Dict, Any, List, Optional
|
| 6 |
+
from datetime import datetime, timedelta
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from fastapi import APIRouter, HTTPException, Depends, Header, Query
|
| 9 |
+
from fastapi.responses import HTMLResponse
|
| 10 |
+
from pydantic import BaseModel
|
| 11 |
+
|
| 12 |
+
from app.core.config import setting
|
| 13 |
+
from app.core.logger import logger
|
| 14 |
+
from app.services.grok.token import token_manager
|
| 15 |
+
from app.services.request_stats import request_stats
|
| 16 |
+
from app.models.grok_models import TokenType
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
router = APIRouter(tags=["admin"])
|
| 20 |
+
|
| 21 |
+
# Constants
|
| 22 |
+
STATIC_DIR = Path(__file__).parents[2] / "template"
|
| 23 |
+
TEMP_DIR = Path(__file__).parents[3] / "data" / "temp"
|
| 24 |
+
IMAGE_CACHE_DIR = TEMP_DIR / "image"
|
| 25 |
+
VIDEO_CACHE_DIR = TEMP_DIR / "video"
|
| 26 |
+
SESSION_EXPIRE_HOURS = 24
|
| 27 |
+
BYTES_PER_KB = 1024
|
| 28 |
+
BYTES_PER_MB = 1024 * 1024
|
| 29 |
+
|
| 30 |
+
# Session storage
|
| 31 |
+
_sessions: Dict[str, datetime] = {}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# === Request/Response models ===
|
| 35 |
+
|
| 36 |
+
class LoginRequest(BaseModel):
|
| 37 |
+
username: str
|
| 38 |
+
password: str
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class LoginResponse(BaseModel):
|
| 42 |
+
success: bool
|
| 43 |
+
token: Optional[str] = None
|
| 44 |
+
message: str
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class AddTokensRequest(BaseModel):
|
| 48 |
+
tokens: List[str]
|
| 49 |
+
token_type: str
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class DeleteTokensRequest(BaseModel):
|
| 53 |
+
tokens: List[str]
|
| 54 |
+
token_type: str
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class TokenInfo(BaseModel):
|
| 58 |
+
token: str
|
| 59 |
+
token_type: str
|
| 60 |
+
created_time: Optional[int] = None
|
| 61 |
+
remaining_queries: int
|
| 62 |
+
heavy_remaining_queries: int
|
| 63 |
+
status: str
|
| 64 |
+
tags: List[str] = []
|
| 65 |
+
note: str = ""
|
| 66 |
+
cooldown_until: Optional[int] = None
|
| 67 |
+
cooldown_remaining: int = 0
|
| 68 |
+
last_failure_time: Optional[int] = None
|
| 69 |
+
last_failure_reason: str = ""
|
| 70 |
+
limit_reason: str = ""
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class TokenListResponse(BaseModel):
|
| 74 |
+
success: bool
|
| 75 |
+
data: List[TokenInfo]
|
| 76 |
+
total: int
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class UpdateSettingsRequest(BaseModel):
|
| 80 |
+
global_config: Optional[Dict[str, Any]] = None
|
| 81 |
+
grok_config: Optional[Dict[str, Any]] = None
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class UpdateTokenTagsRequest(BaseModel):
|
| 85 |
+
token: str
|
| 86 |
+
token_type: str
|
| 87 |
+
tags: List[str]
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class UpdateTokenNoteRequest(BaseModel):
|
| 91 |
+
token: str
|
| 92 |
+
token_type: str
|
| 93 |
+
note: str
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class TestTokenRequest(BaseModel):
|
| 97 |
+
token: str
|
| 98 |
+
token_type: str
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
# === Helper functions ===
|
| 102 |
+
|
| 103 |
+
def validate_token_type(token_type_str: str) -> TokenType:
|
| 104 |
+
"""Validate token type"""
|
| 105 |
+
if token_type_str not in ["sso", "ssoSuper"]:
|
| 106 |
+
raise HTTPException(
|
| 107 |
+
status_code=400,
|
| 108 |
+
detail={"error": "Invalid token type", "code": "INVALID_TYPE"}
|
| 109 |
+
)
|
| 110 |
+
return TokenType.NORMAL if token_type_str == "sso" else TokenType.SUPER
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def parse_created_time(created_time) -> Optional[int]:
|
| 114 |
+
"""Parse created time"""
|
| 115 |
+
if isinstance(created_time, str):
|
| 116 |
+
return int(created_time) if created_time else None
|
| 117 |
+
elif isinstance(created_time, int):
|
| 118 |
+
return created_time
|
| 119 |
+
return None
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def _get_cooldown_remaining_ms(token_data: Dict[str, Any], now_ms: Optional[int] = None) -> int:
|
| 123 |
+
"""Get remaining cooldown time (ms)."""
|
| 124 |
+
cooldown_until = token_data.get("cooldownUntil")
|
| 125 |
+
if not cooldown_until:
|
| 126 |
+
return 0
|
| 127 |
+
|
| 128 |
+
try:
|
| 129 |
+
now = now_ms if now_ms is not None else int(time.time() * 1000)
|
| 130 |
+
remaining = int(cooldown_until) - now
|
| 131 |
+
return remaining if remaining > 0 else 0
|
| 132 |
+
except (TypeError, ValueError):
|
| 133 |
+
return 0
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def _is_token_in_cooldown(token_data: Dict[str, Any], now_ms: Optional[int] = None) -> bool:
|
| 137 |
+
"""Check if token is in 429 cooldown."""
|
| 138 |
+
return _get_cooldown_remaining_ms(token_data, now_ms) > 0
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def calculate_token_stats(tokens: Dict[str, Any], token_type: str) -> Dict[str, int]:
|
| 142 |
+
"""Calculate token stats."""
|
| 143 |
+
total = len(tokens)
|
| 144 |
+
expired = sum(1 for t in tokens.values() if t.get("status") == "expired")
|
| 145 |
+
now_ms = int(time.time() * 1000)
|
| 146 |
+
cooldown = 0
|
| 147 |
+
exhausted = 0
|
| 148 |
+
unused = 0
|
| 149 |
+
active = 0
|
| 150 |
+
|
| 151 |
+
for token_data in tokens.values():
|
| 152 |
+
if token_data.get("status") == "expired":
|
| 153 |
+
continue
|
| 154 |
+
|
| 155 |
+
if _is_token_in_cooldown(token_data, now_ms):
|
| 156 |
+
cooldown += 1
|
| 157 |
+
continue
|
| 158 |
+
|
| 159 |
+
remaining = token_data.get("remainingQueries", -1)
|
| 160 |
+
heavy_remaining = token_data.get("heavyremainingQueries", -1)
|
| 161 |
+
|
| 162 |
+
if token_type == "normal":
|
| 163 |
+
if remaining == -1:
|
| 164 |
+
unused += 1
|
| 165 |
+
elif remaining == 0:
|
| 166 |
+
exhausted += 1
|
| 167 |
+
else:
|
| 168 |
+
active += 1
|
| 169 |
+
else:
|
| 170 |
+
if remaining == -1 and heavy_remaining == -1:
|
| 171 |
+
unused += 1
|
| 172 |
+
elif remaining == 0 or heavy_remaining == 0:
|
| 173 |
+
exhausted += 1
|
| 174 |
+
else:
|
| 175 |
+
active += 1
|
| 176 |
+
|
| 177 |
+
limited = cooldown + exhausted
|
| 178 |
+
return {
|
| 179 |
+
"total": total,
|
| 180 |
+
"unused": unused,
|
| 181 |
+
"limited": limited,
|
| 182 |
+
"cooldown": cooldown,
|
| 183 |
+
"exhausted": exhausted,
|
| 184 |
+
"expired": expired,
|
| 185 |
+
"active": active
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def verify_admin_session(authorization: Optional[str] = Header(None)) -> bool:
|
| 190 |
+
"""Verify admin session"""
|
| 191 |
+
if not authorization or not authorization.startswith("Bearer "):
|
| 192 |
+
raise HTTPException(status_code=401, detail={"error": "Unauthorized", "code": "UNAUTHORIZED"})
|
| 193 |
+
|
| 194 |
+
token = authorization[7:]
|
| 195 |
+
|
| 196 |
+
if token not in _sessions:
|
| 197 |
+
raise HTTPException(status_code=401, detail={"error": "Invalid session", "code": "SESSION_INVALID"})
|
| 198 |
+
|
| 199 |
+
if datetime.now() > _sessions[token]:
|
| 200 |
+
del _sessions[token]
|
| 201 |
+
raise HTTPException(status_code=401, detail={"error": "Session expired", "code": "SESSION_EXPIRED"})
|
| 202 |
+
|
| 203 |
+
return True
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def get_token_status(token_data: Dict[str, Any], token_type: str) -> str:
|
| 207 |
+
"""Get token status."""
|
| 208 |
+
if token_data.get("status") == "expired":
|
| 209 |
+
return "expired"
|
| 210 |
+
|
| 211 |
+
if _is_token_in_cooldown(token_data):
|
| 212 |
+
return "cooldown"
|
| 213 |
+
|
| 214 |
+
remaining = token_data.get("remainingQueries", -1)
|
| 215 |
+
heavy_remaining = token_data.get("heavyremainingQueries", -1)
|
| 216 |
+
|
| 217 |
+
if token_type == "ssoSuper":
|
| 218 |
+
if remaining == -1 and heavy_remaining == -1:
|
| 219 |
+
return "unused"
|
| 220 |
+
if remaining == 0 or heavy_remaining == 0:
|
| 221 |
+
return "exhausted"
|
| 222 |
+
return "active"
|
| 223 |
+
|
| 224 |
+
if remaining == -1:
|
| 225 |
+
return "unused"
|
| 226 |
+
if remaining == 0:
|
| 227 |
+
return "exhausted"
|
| 228 |
+
return "active"
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def _calculate_dir_size(directory: Path) -> int:
|
| 232 |
+
"""Calculate directory size"""
|
| 233 |
+
total = 0
|
| 234 |
+
for file_path in directory.iterdir():
|
| 235 |
+
if file_path.is_file():
|
| 236 |
+
try:
|
| 237 |
+
total += file_path.stat().st_size
|
| 238 |
+
except Exception as e:
|
| 239 |
+
logger.warning(f"[Admin] Unable to get file size: {file_path.name}, {e}")
|
| 240 |
+
return total
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def _format_size(size_bytes: int) -> str:
|
| 244 |
+
"""Format file size"""
|
| 245 |
+
size_mb = size_bytes / BYTES_PER_MB
|
| 246 |
+
if size_mb < 1:
|
| 247 |
+
return f"{size_bytes / BYTES_PER_KB:.1f} KB"
|
| 248 |
+
return f"{size_mb:.1f} MB"
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
# === Page routes ===
|
| 252 |
+
|
| 253 |
+
@router.get("/login", response_class=HTMLResponse)
|
| 254 |
+
async def login_page():
|
| 255 |
+
"""Login page"""
|
| 256 |
+
login_html = STATIC_DIR / "login.html"
|
| 257 |
+
if login_html.exists():
|
| 258 |
+
return login_html.read_text(encoding="utf-8")
|
| 259 |
+
raise HTTPException(status_code=404, detail="Login page not found")
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
@router.get("/manage", response_class=HTMLResponse)
|
| 263 |
+
async def manage_page():
|
| 264 |
+
"""Admin page"""
|
| 265 |
+
admin_html = STATIC_DIR / "admin.html"
|
| 266 |
+
if admin_html.exists():
|
| 267 |
+
return admin_html.read_text(encoding="utf-8")
|
| 268 |
+
raise HTTPException(status_code=404, detail="Admin page not found")
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
# === API endpoints ===
|
| 272 |
+
|
| 273 |
+
@router.post("/api/login", response_model=LoginResponse)
|
| 274 |
+
async def admin_login(request: LoginRequest) -> LoginResponse:
|
| 275 |
+
"""Admin login"""
|
| 276 |
+
try:
|
| 277 |
+
logger.debug(f"[Admin] Login attempt: {request.username}")
|
| 278 |
+
|
| 279 |
+
expected_user = setting.global_config.get("admin_username", "")
|
| 280 |
+
expected_pass = setting.global_config.get("admin_password", "")
|
| 281 |
+
|
| 282 |
+
if request.username != expected_user or request.password != expected_pass:
|
| 283 |
+
logger.warning(f"[Admin] Login failed: {request.username}")
|
| 284 |
+
return LoginResponse(success=False, message="Invalid username or password")
|
| 285 |
+
|
| 286 |
+
session_token = secrets.token_urlsafe(32)
|
| 287 |
+
_sessions[session_token] = datetime.now() + timedelta(hours=SESSION_EXPIRE_HOURS)
|
| 288 |
+
|
| 289 |
+
logger.debug(f"[Admin] Login succeeded: {request.username}")
|
| 290 |
+
return LoginResponse(success=True, token=session_token, message="Login succeeded")
|
| 291 |
+
|
| 292 |
+
except Exception as e:
|
| 293 |
+
logger.error(f"[Admin] Login error: {e}")
|
| 294 |
+
raise HTTPException(status_code=500, detail={"error": f"Login failed: {e}", "code": "LOGIN_ERROR"})
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
@router.post("/api/logout")
|
| 298 |
+
async def admin_logout(_: bool = Depends(verify_admin_session), authorization: Optional[str] = Header(None)) -> Dict[str, Any]:
|
| 299 |
+
"""Admin logout"""
|
| 300 |
+
try:
|
| 301 |
+
if authorization and authorization.startswith("Bearer "):
|
| 302 |
+
token = authorization[7:]
|
| 303 |
+
if token in _sessions:
|
| 304 |
+
del _sessions[token]
|
| 305 |
+
logger.debug("[Admin] Logout succeeded")
|
| 306 |
+
return {"success": True, "message": "Logout succeeded"}
|
| 307 |
+
|
| 308 |
+
logger.warning("[Admin] Logout failed: invalid session")
|
| 309 |
+
return {"success": False, "message": "Invalid session"}
|
| 310 |
+
|
| 311 |
+
except Exception as e:
|
| 312 |
+
logger.error(f"[Admin] Logout error: {e}")
|
| 313 |
+
raise HTTPException(status_code=500, detail={"error": f"Logout failed: {e}", "code": "LOGOUT_ERROR"})
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
@router.get("/api/tokens", response_model=TokenListResponse)
|
| 317 |
+
async def list_tokens(_: bool = Depends(verify_admin_session)) -> TokenListResponse:
|
| 318 |
+
"""Get token list"""
|
| 319 |
+
try:
|
| 320 |
+
logger.debug("[Admin] Fetching token list")
|
| 321 |
+
|
| 322 |
+
all_tokens = token_manager.get_tokens()
|
| 323 |
+
token_list: List[TokenInfo] = []
|
| 324 |
+
now_ms = int(time.time() * 1000)
|
| 325 |
+
|
| 326 |
+
# Normal tokens
|
| 327 |
+
for token, data in all_tokens.get(TokenType.NORMAL.value, {}).items():
|
| 328 |
+
cooldown_remaining_ms = _get_cooldown_remaining_ms(data, now_ms)
|
| 329 |
+
cooldown_until = data.get("cooldownUntil") if cooldown_remaining_ms else None
|
| 330 |
+
limit_reason = "cooldown" if cooldown_remaining_ms else ""
|
| 331 |
+
if not limit_reason and data.get("remainingQueries", -1) == 0:
|
| 332 |
+
limit_reason = "exhausted"
|
| 333 |
+
token_list.append(TokenInfo(
|
| 334 |
+
token=token,
|
| 335 |
+
token_type="sso",
|
| 336 |
+
created_time=parse_created_time(data.get("createdTime")),
|
| 337 |
+
remaining_queries=data.get("remainingQueries", -1),
|
| 338 |
+
heavy_remaining_queries=data.get("heavyremainingQueries", -1),
|
| 339 |
+
status=get_token_status(data, "sso"),
|
| 340 |
+
tags=data.get("tags", []),
|
| 341 |
+
note=data.get("note", ""),
|
| 342 |
+
cooldown_until=cooldown_until,
|
| 343 |
+
cooldown_remaining=(cooldown_remaining_ms + 999) // 1000 if cooldown_remaining_ms else 0,
|
| 344 |
+
last_failure_time=data.get("lastFailureTime") or None,
|
| 345 |
+
last_failure_reason=data.get("lastFailureReason") or "",
|
| 346 |
+
limit_reason=limit_reason
|
| 347 |
+
))
|
| 348 |
+
|
| 349 |
+
# Super tokens
|
| 350 |
+
for token, data in all_tokens.get(TokenType.SUPER.value, {}).items():
|
| 351 |
+
cooldown_remaining_ms = _get_cooldown_remaining_ms(data, now_ms)
|
| 352 |
+
cooldown_until = data.get("cooldownUntil") if cooldown_remaining_ms else None
|
| 353 |
+
limit_reason = "cooldown" if cooldown_remaining_ms else ""
|
| 354 |
+
if not limit_reason and (data.get("remainingQueries", -1) == 0 or data.get("heavyremainingQueries", -1) == 0):
|
| 355 |
+
limit_reason = "exhausted"
|
| 356 |
+
token_list.append(TokenInfo(
|
| 357 |
+
token=token,
|
| 358 |
+
token_type="ssoSuper",
|
| 359 |
+
created_time=parse_created_time(data.get("createdTime")),
|
| 360 |
+
remaining_queries=data.get("remainingQueries", -1),
|
| 361 |
+
heavy_remaining_queries=data.get("heavyremainingQueries", -1),
|
| 362 |
+
status=get_token_status(data, "ssoSuper"),
|
| 363 |
+
tags=data.get("tags", []),
|
| 364 |
+
note=data.get("note", ""),
|
| 365 |
+
cooldown_until=cooldown_until,
|
| 366 |
+
cooldown_remaining=(cooldown_remaining_ms + 999) // 1000 if cooldown_remaining_ms else 0,
|
| 367 |
+
last_failure_time=data.get("lastFailureTime") or None,
|
| 368 |
+
last_failure_reason=data.get("lastFailureReason") or "",
|
| 369 |
+
limit_reason=limit_reason
|
| 370 |
+
))
|
| 371 |
+
|
| 372 |
+
logger.debug(f"[Admin] Token list retrieved: {len(token_list)} items")
|
| 373 |
+
return TokenListResponse(success=True, data=token_list, total=len(token_list))
|
| 374 |
+
|
| 375 |
+
except Exception as e:
|
| 376 |
+
logger.error(f"[Admin] Token list error: {e}")
|
| 377 |
+
raise HTTPException(status_code=500, detail={"error": f"Fetch failed: {e}", "code": "LIST_ERROR"})
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
@router.post("/api/tokens/add")
|
| 381 |
+
async def add_tokens(request: AddTokensRequest, _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 382 |
+
"""Batch add tokens"""
|
| 383 |
+
try:
|
| 384 |
+
logger.debug(f"[Admin] Adding tokens: {request.token_type}, {len(request.tokens)} items")
|
| 385 |
+
|
| 386 |
+
token_type = validate_token_type(request.token_type)
|
| 387 |
+
await token_manager.add_token(request.tokens, token_type)
|
| 388 |
+
|
| 389 |
+
logger.debug(f"[Admin] Tokens added: {len(request.tokens)} items")
|
| 390 |
+
return {"success": True, "message": f"Successfully added {len(request.tokens)} tokens", "count": len(request.tokens)}
|
| 391 |
+
|
| 392 |
+
except HTTPException:
|
| 393 |
+
raise
|
| 394 |
+
except Exception as e:
|
| 395 |
+
logger.error(f"[Admin] Token add error: {e}")
|
| 396 |
+
raise HTTPException(status_code=500, detail={"error": f"Add failed: {e}", "code": "ADD_ERROR"})
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
@router.post("/api/tokens/delete")
|
| 400 |
+
async def delete_tokens(request: DeleteTokensRequest, _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 401 |
+
"""Batch delete tokens"""
|
| 402 |
+
try:
|
| 403 |
+
logger.debug(f"[Admin] Deleting tokens: {request.token_type}, {len(request.tokens)} items")
|
| 404 |
+
|
| 405 |
+
token_type = validate_token_type(request.token_type)
|
| 406 |
+
await token_manager.delete_token(request.tokens, token_type)
|
| 407 |
+
|
| 408 |
+
logger.debug(f"[Admin] Tokens deleted: {len(request.tokens)} items")
|
| 409 |
+
return {"success": True, "message": f"Successfully deleted {len(request.tokens)} tokens", "count": len(request.tokens)}
|
| 410 |
+
|
| 411 |
+
except HTTPException:
|
| 412 |
+
raise
|
| 413 |
+
except Exception as e:
|
| 414 |
+
logger.error(f"[Admin] Token delete error: {e}")
|
| 415 |
+
raise HTTPException(status_code=500, detail={"error": f"Delete failed: {e}", "code": "DELETE_ERROR"})
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
@router.get("/api/settings")
|
| 419 |
+
async def get_settings(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 420 |
+
"""Get settings"""
|
| 421 |
+
try:
|
| 422 |
+
logger.debug("[Admin] Fetching settings")
|
| 423 |
+
return {"success": True, "data": {"global": setting.global_config, "grok": setting.grok_config}}
|
| 424 |
+
except Exception as e:
|
| 425 |
+
logger.error(f"[Admin] Failed to fetch settings: {e}")
|
| 426 |
+
raise HTTPException(status_code=500, detail={"error": f"Fetch failed: {e}", "code": "GET_SETTINGS_ERROR"})
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
@router.post("/api/settings")
|
| 430 |
+
async def update_settings(request: UpdateSettingsRequest, _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 431 |
+
"""Update settings"""
|
| 432 |
+
try:
|
| 433 |
+
logger.debug("[Admin] Updating settings")
|
| 434 |
+
await setting.save(global_config=request.global_config, grok_config=request.grok_config)
|
| 435 |
+
logger.debug("[Admin] Settings updated")
|
| 436 |
+
return {"success": True, "message": "Settings updated"}
|
| 437 |
+
except Exception as e:
|
| 438 |
+
logger.error(f"[Admin] Failed to update settings: {e}")
|
| 439 |
+
raise HTTPException(status_code=500, detail={"error": f"Update failed: {e}", "code": "UPDATE_SETTINGS_ERROR"})
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
@router.get("/api/cache/size")
|
| 443 |
+
async def get_cache_size(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 444 |
+
"""Get cache size"""
|
| 445 |
+
try:
|
| 446 |
+
logger.debug("[Admin] Fetching cache size")
|
| 447 |
+
|
| 448 |
+
image_size = _calculate_dir_size(IMAGE_CACHE_DIR) if IMAGE_CACHE_DIR.exists() else 0
|
| 449 |
+
video_size = _calculate_dir_size(VIDEO_CACHE_DIR) if VIDEO_CACHE_DIR.exists() else 0
|
| 450 |
+
total_size = image_size + video_size
|
| 451 |
+
|
| 452 |
+
logger.debug(f"[Admin] Cache size: images {_format_size(image_size)}, videos {_format_size(video_size)}")
|
| 453 |
+
|
| 454 |
+
return {
|
| 455 |
+
"success": True,
|
| 456 |
+
"data": {
|
| 457 |
+
"image_size": _format_size(image_size),
|
| 458 |
+
"video_size": _format_size(video_size),
|
| 459 |
+
"total_size": _format_size(total_size),
|
| 460 |
+
"image_size_bytes": image_size,
|
| 461 |
+
"video_size_bytes": video_size,
|
| 462 |
+
"total_size_bytes": total_size
|
| 463 |
+
}
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
except Exception as e:
|
| 467 |
+
logger.error(f"[Admin] Cache size error: {e}")
|
| 468 |
+
raise HTTPException(status_code=500, detail={"error": f"Fetch failed: {e}", "code": "CACHE_SIZE_ERROR"})
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
@router.get("/api/cache/list")
|
| 472 |
+
async def list_cache_files(
|
| 473 |
+
cache_type: str = Query("image", alias="type"),
|
| 474 |
+
limit: int = 50,
|
| 475 |
+
offset: int = 0,
|
| 476 |
+
_: bool = Depends(verify_admin_session)
|
| 477 |
+
) -> Dict[str, Any]:
|
| 478 |
+
"""List cached files for admin preview."""
|
| 479 |
+
try:
|
| 480 |
+
cache_type = cache_type.lower()
|
| 481 |
+
if cache_type not in ("image", "video"):
|
| 482 |
+
raise HTTPException(status_code=400, detail={"error": "Invalid cache type", "code": "INVALID_CACHE_TYPE"})
|
| 483 |
+
|
| 484 |
+
if limit < 1:
|
| 485 |
+
limit = 1
|
| 486 |
+
if limit > 200:
|
| 487 |
+
limit = 200
|
| 488 |
+
if offset < 0:
|
| 489 |
+
offset = 0
|
| 490 |
+
|
| 491 |
+
cache_dir = IMAGE_CACHE_DIR if cache_type == "image" else VIDEO_CACHE_DIR
|
| 492 |
+
if not cache_dir.exists():
|
| 493 |
+
return {"success": True, "data": {"total": 0, "items": [], "offset": offset, "limit": limit, "has_more": False}}
|
| 494 |
+
|
| 495 |
+
files = []
|
| 496 |
+
for file_path in cache_dir.iterdir():
|
| 497 |
+
if not file_path.is_file():
|
| 498 |
+
continue
|
| 499 |
+
try:
|
| 500 |
+
stat = file_path.stat()
|
| 501 |
+
except Exception as e:
|
| 502 |
+
logger.warning(f"[Admin] Skip cache file: {file_path.name}, {e}")
|
| 503 |
+
continue
|
| 504 |
+
files.append((file_path, stat.st_mtime, stat.st_size))
|
| 505 |
+
|
| 506 |
+
files.sort(key=lambda item: item[1], reverse=True)
|
| 507 |
+
total = len(files)
|
| 508 |
+
sliced = files[offset:offset + limit]
|
| 509 |
+
|
| 510 |
+
items = [
|
| 511 |
+
{
|
| 512 |
+
"name": file_path.name,
|
| 513 |
+
"size": _format_size(size),
|
| 514 |
+
"size_bytes": size,
|
| 515 |
+
"mtime": int(mtime * 1000),
|
| 516 |
+
"url": f"/images/{file_path.name}",
|
| 517 |
+
"type": cache_type
|
| 518 |
+
}
|
| 519 |
+
for file_path, mtime, size in sliced
|
| 520 |
+
]
|
| 521 |
+
|
| 522 |
+
return {
|
| 523 |
+
"success": True,
|
| 524 |
+
"data": {
|
| 525 |
+
"total": total,
|
| 526 |
+
"items": items,
|
| 527 |
+
"offset": offset,
|
| 528 |
+
"limit": limit,
|
| 529 |
+
"has_more": offset + limit < total
|
| 530 |
+
}
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
except HTTPException:
|
| 534 |
+
raise
|
| 535 |
+
except Exception as e:
|
| 536 |
+
logger.error(f"[Admin] Cache list error: {e}")
|
| 537 |
+
raise HTTPException(status_code=500, detail={"error": f"Fetch failed: {e}", "code": "CACHE_LIST_ERROR"})
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
@router.post("/api/cache/clear")
|
| 541 |
+
async def clear_cache(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 542 |
+
"""Clear all cache"""
|
| 543 |
+
try:
|
| 544 |
+
logger.debug("[Admin] Clearing cache")
|
| 545 |
+
|
| 546 |
+
image_count = 0
|
| 547 |
+
video_count = 0
|
| 548 |
+
|
| 549 |
+
# Clear images
|
| 550 |
+
if IMAGE_CACHE_DIR.exists():
|
| 551 |
+
for file_path in IMAGE_CACHE_DIR.iterdir():
|
| 552 |
+
if file_path.is_file():
|
| 553 |
+
try:
|
| 554 |
+
file_path.unlink()
|
| 555 |
+
image_count += 1
|
| 556 |
+
except Exception as e:
|
| 557 |
+
logger.error(f"[Admin] Delete failed: {file_path.name}, {e}")
|
| 558 |
+
|
| 559 |
+
# Clear videos
|
| 560 |
+
if VIDEO_CACHE_DIR.exists():
|
| 561 |
+
for file_path in VIDEO_CACHE_DIR.iterdir():
|
| 562 |
+
if file_path.is_file():
|
| 563 |
+
try:
|
| 564 |
+
file_path.unlink()
|
| 565 |
+
video_count += 1
|
| 566 |
+
except Exception as e:
|
| 567 |
+
logger.error(f"[Admin] Delete failed: {file_path.name}, {e}")
|
| 568 |
+
|
| 569 |
+
total = image_count + video_count
|
| 570 |
+
logger.debug(f"[Admin] Cache cleared: images {image_count}, videos {video_count}")
|
| 571 |
+
|
| 572 |
+
return {
|
| 573 |
+
"success": True,
|
| 574 |
+
"message": f"Cache cleared, deleted {image_count} images and {video_count} videos, {total} files total",
|
| 575 |
+
"data": {"deleted_count": total, "image_count": image_count, "video_count": video_count}
|
| 576 |
+
}
|
| 577 |
+
|
| 578 |
+
except Exception as e:
|
| 579 |
+
logger.error(f"[Admin] Cache clear error: {e}")
|
| 580 |
+
raise HTTPException(status_code=500, detail={"error": f"Clear failed: {e}", "code": "CACHE_CLEAR_ERROR"})
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
@router.post("/api/cache/clear/images")
|
| 584 |
+
async def clear_image_cache(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 585 |
+
"""Clear image cache"""
|
| 586 |
+
try:
|
| 587 |
+
logger.debug("[Admin] Clearing image cache")
|
| 588 |
+
|
| 589 |
+
count = 0
|
| 590 |
+
if IMAGE_CACHE_DIR.exists():
|
| 591 |
+
for file_path in IMAGE_CACHE_DIR.iterdir():
|
| 592 |
+
if file_path.is_file():
|
| 593 |
+
try:
|
| 594 |
+
file_path.unlink()
|
| 595 |
+
count += 1
|
| 596 |
+
except Exception as e:
|
| 597 |
+
logger.error(f"[Admin] Delete failed: {file_path.name}, {e}")
|
| 598 |
+
|
| 599 |
+
logger.debug(f"[Admin] Image cache cleared: {count} items")
|
| 600 |
+
return {"success": True, "message": f"Image cache cleared, deleted {count} files", "data": {"deleted_count": count, "type": "images"}}
|
| 601 |
+
|
| 602 |
+
except Exception as e:
|
| 603 |
+
logger.error(f"[Admin] Image cache clear error: {e}")
|
| 604 |
+
raise HTTPException(status_code=500, detail={"error": f"Clear failed: {e}", "code": "IMAGE_CACHE_CLEAR_ERROR"})
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
@router.post("/api/cache/clear/videos")
|
| 608 |
+
async def clear_video_cache(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 609 |
+
"""Clear video cache"""
|
| 610 |
+
try:
|
| 611 |
+
logger.debug("[Admin] Clearing video cache")
|
| 612 |
+
|
| 613 |
+
count = 0
|
| 614 |
+
if VIDEO_CACHE_DIR.exists():
|
| 615 |
+
for file_path in VIDEO_CACHE_DIR.iterdir():
|
| 616 |
+
if file_path.is_file():
|
| 617 |
+
try:
|
| 618 |
+
file_path.unlink()
|
| 619 |
+
count += 1
|
| 620 |
+
except Exception as e:
|
| 621 |
+
logger.error(f"[Admin] Delete failed: {file_path.name}, {e}")
|
| 622 |
+
|
| 623 |
+
logger.debug(f"[Admin] Video cache cleared: {count} items")
|
| 624 |
+
return {"success": True, "message": f"Video cache cleared, deleted {count} files", "data": {"deleted_count": count, "type": "videos"}}
|
| 625 |
+
|
| 626 |
+
except Exception as e:
|
| 627 |
+
logger.error(f"[Admin] Video cache clear error: {e}")
|
| 628 |
+
raise HTTPException(status_code=500, detail={"error": f"Clear failed: {e}", "code": "VIDEO_CACHE_CLEAR_ERROR"})
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
@router.get("/api/stats")
|
| 632 |
+
async def get_stats(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 633 |
+
"""Get stats"""
|
| 634 |
+
try:
|
| 635 |
+
logger.debug("[Admin] Fetching stats")
|
| 636 |
+
|
| 637 |
+
all_tokens = token_manager.get_tokens()
|
| 638 |
+
normal_stats = calculate_token_stats(all_tokens.get(TokenType.NORMAL.value, {}), "normal")
|
| 639 |
+
super_stats = calculate_token_stats(all_tokens.get(TokenType.SUPER.value, {}), "super")
|
| 640 |
+
total = normal_stats["total"] + super_stats["total"]
|
| 641 |
+
|
| 642 |
+
logger.debug(f"[Admin] Stats fetched - Normal tokens: {normal_stats['total']}, Super tokens: {super_stats['total']}, Total: {total}")
|
| 643 |
+
return {"success": True, "data": {"normal": normal_stats, "super": super_stats, "total": total}}
|
| 644 |
+
|
| 645 |
+
except Exception as e:
|
| 646 |
+
logger.error(f"[Admin] Stats error: {e}")
|
| 647 |
+
raise HTTPException(status_code=500, detail={"error": f"Fetch failed: {e}", "code": "STATS_ERROR"})
|
| 648 |
+
|
| 649 |
+
|
| 650 |
+
@router.get("/api/storage/mode")
|
| 651 |
+
async def get_storage_mode(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 652 |
+
"""Get storage mode"""
|
| 653 |
+
try:
|
| 654 |
+
logger.debug("[Admin] Fetching storage mode")
|
| 655 |
+
import os
|
| 656 |
+
mode = os.getenv("STORAGE_MODE", "file").upper()
|
| 657 |
+
return {"success": True, "data": {"mode": mode}}
|
| 658 |
+
except Exception as e:
|
| 659 |
+
logger.error(f"[Admin] Storage mode error: {e}")
|
| 660 |
+
raise HTTPException(status_code=500, detail={"error": f"Fetch failed: {e}", "code": "STORAGE_MODE_ERROR"})
|
| 661 |
+
|
| 662 |
+
|
| 663 |
+
@router.post("/api/tokens/tags")
|
| 664 |
+
async def update_token_tags(request: UpdateTokenTagsRequest, _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 665 |
+
"""Update token tags"""
|
| 666 |
+
try:
|
| 667 |
+
logger.debug(f"[Admin] Updating token tags: {request.token[:10]}..., {request.tags}")
|
| 668 |
+
|
| 669 |
+
token_type = validate_token_type(request.token_type)
|
| 670 |
+
await token_manager.update_token_tags(request.token, token_type, request.tags)
|
| 671 |
+
|
| 672 |
+
logger.debug(f"[Admin] Token tags updated: {request.token[:10]}...")
|
| 673 |
+
return {"success": True, "message": "Tags updated successfully", "tags": request.tags}
|
| 674 |
+
|
| 675 |
+
except HTTPException:
|
| 676 |
+
raise
|
| 677 |
+
except Exception as e:
|
| 678 |
+
logger.error(f"[Admin] Token tag update error: {e}")
|
| 679 |
+
raise HTTPException(status_code=500, detail={"error": f"Update failed: {e}", "code": "UPDATE_TAGS_ERROR"})
|
| 680 |
+
|
| 681 |
+
|
| 682 |
+
@router.get("/api/tokens/tags/all")
|
| 683 |
+
async def get_all_tags(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 684 |
+
"""Get all tags"""
|
| 685 |
+
try:
|
| 686 |
+
logger.debug("[Admin] Fetching all tags")
|
| 687 |
+
|
| 688 |
+
all_tokens = token_manager.get_tokens()
|
| 689 |
+
tags_set = set()
|
| 690 |
+
|
| 691 |
+
for token_type_data in all_tokens.values():
|
| 692 |
+
for token_data in token_type_data.values():
|
| 693 |
+
tags = token_data.get("tags", [])
|
| 694 |
+
if isinstance(tags, list):
|
| 695 |
+
tags_set.update(tags)
|
| 696 |
+
|
| 697 |
+
tags_list = sorted(list(tags_set))
|
| 698 |
+
logger.debug(f"[Admin] Tags fetched: {len(tags_list)} items")
|
| 699 |
+
return {"success": True, "data": tags_list}
|
| 700 |
+
|
| 701 |
+
except Exception as e:
|
| 702 |
+
logger.error(f"[Admin] Tag fetch error: {e}")
|
| 703 |
+
raise HTTPException(status_code=500, detail={"error": f"Fetch failed: {e}", "code": "GET_TAGS_ERROR"})
|
| 704 |
+
|
| 705 |
+
|
| 706 |
+
@router.post("/api/tokens/note")
|
| 707 |
+
async def update_token_note(request: UpdateTokenNoteRequest, _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 708 |
+
"""Update token note"""
|
| 709 |
+
try:
|
| 710 |
+
logger.debug(f"[Admin] Updating token note: {request.token[:10]}...")
|
| 711 |
+
|
| 712 |
+
token_type = validate_token_type(request.token_type)
|
| 713 |
+
await token_manager.update_token_note(request.token, token_type, request.note)
|
| 714 |
+
|
| 715 |
+
logger.debug(f"[Admin] Token note updated: {request.token[:10]}...")
|
| 716 |
+
return {"success": True, "message": "Note updated successfully", "note": request.note}
|
| 717 |
+
|
| 718 |
+
except HTTPException:
|
| 719 |
+
raise
|
| 720 |
+
except Exception as e:
|
| 721 |
+
logger.error(f"[Admin] Token note update error: {e}")
|
| 722 |
+
raise HTTPException(status_code=500, detail={"error": f"Update failed: {e}", "code": "UPDATE_NOTE_ERROR"})
|
| 723 |
+
|
| 724 |
+
|
| 725 |
+
@router.post("/api/tokens/test")
|
| 726 |
+
async def test_token(request: TestTokenRequest, _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 727 |
+
"""Test token availability"""
|
| 728 |
+
try:
|
| 729 |
+
logger.debug(f"[Admin] Testing token: {request.token[:10]}...")
|
| 730 |
+
|
| 731 |
+
token_type = validate_token_type(request.token_type)
|
| 732 |
+
auth_token = f"sso-rw={request.token};sso={request.token}"
|
| 733 |
+
|
| 734 |
+
result = await token_manager.check_limits(auth_token, "grok-4-fast")
|
| 735 |
+
|
| 736 |
+
if result:
|
| 737 |
+
logger.debug(f"[Admin] Token test succeeded: {request.token[:10]}...")
|
| 738 |
+
return {
|
| 739 |
+
"success": True,
|
| 740 |
+
"message": "Token valid",
|
| 741 |
+
"data": {
|
| 742 |
+
"valid": True,
|
| 743 |
+
"remaining_queries": result.get("remainingTokens", -1),
|
| 744 |
+
"limit": result.get("limit", -1)
|
| 745 |
+
}
|
| 746 |
+
}
|
| 747 |
+
else:
|
| 748 |
+
logger.warning(f"[Admin] Token test failed: {request.token[:10]}...")
|
| 749 |
+
|
| 750 |
+
all_tokens = token_manager.get_tokens()
|
| 751 |
+
token_data = all_tokens.get(token_type.value, {}).get(request.token)
|
| 752 |
+
|
| 753 |
+
if token_data:
|
| 754 |
+
if token_data.get("status") == "expired":
|
| 755 |
+
return {"success": False, "message": "Token expired", "data": {"valid": False, "error_type": "expired", "error_code": 401}}
|
| 756 |
+
cooldown_remaining_ms = _get_cooldown_remaining_ms(token_data)
|
| 757 |
+
if cooldown_remaining_ms:
|
| 758 |
+
return {
|
| 759 |
+
"success": False,
|
| 760 |
+
"message": "Token is in cooldown",
|
| 761 |
+
"data": {
|
| 762 |
+
"valid": False,
|
| 763 |
+
"error_type": "cooldown",
|
| 764 |
+
"error_code": 429,
|
| 765 |
+
"cooldown_remaining": (cooldown_remaining_ms + 999) // 1000
|
| 766 |
+
}
|
| 767 |
+
}
|
| 768 |
+
|
| 769 |
+
exhausted = token_data.get("remainingQueries") == 0
|
| 770 |
+
if token_type == TokenType.SUPER and token_data.get("heavyremainingQueries") == 0:
|
| 771 |
+
exhausted = True
|
| 772 |
+
if exhausted:
|
| 773 |
+
return {
|
| 774 |
+
"success": False,
|
| 775 |
+
"message": "Token quota exhausted",
|
| 776 |
+
"data": {"valid": False, "error_type": "exhausted", "error_code": "quota_exhausted"}
|
| 777 |
+
}
|
| 778 |
+
else:
|
| 779 |
+
return {"success": False, "message": "Server blocked or network error", "data": {"valid": False, "error_type": "blocked", "error_code": 403}}
|
| 780 |
+
else:
|
| 781 |
+
return {"success": False, "message": "Token data error", "data": {"valid": False, "error_type": "unknown", "error_code": "data_error"}}
|
| 782 |
+
|
| 783 |
+
except HTTPException:
|
| 784 |
+
raise
|
| 785 |
+
except Exception as e:
|
| 786 |
+
logger.error(f"[Admin] Token test error: {e}")
|
| 787 |
+
raise HTTPException(status_code=500, detail={"error": f"Test failed: {e}", "code": "TEST_TOKEN_ERROR"})
|
| 788 |
+
|
| 789 |
+
|
| 790 |
+
@router.post("/api/tokens/refresh-all")
|
| 791 |
+
async def refresh_all_tokens(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 792 |
+
"""Refresh remaining counts for all tokens (background)"""
|
| 793 |
+
import asyncio
|
| 794 |
+
|
| 795 |
+
try:
|
| 796 |
+
# Check if already refreshing
|
| 797 |
+
progress = token_manager.get_refresh_progress()
|
| 798 |
+
if progress.get("running"):
|
| 799 |
+
return {
|
| 800 |
+
"success": False,
|
| 801 |
+
"message": "Refresh task already running",
|
| 802 |
+
"data": progress
|
| 803 |
+
}
|
| 804 |
+
|
| 805 |
+
# Start background refresh task
|
| 806 |
+
logger.info("[Admin] Starting background refresh task")
|
| 807 |
+
asyncio.create_task(token_manager.refresh_all_limits())
|
| 808 |
+
|
| 809 |
+
# Return immediately so the UI can poll progress
|
| 810 |
+
return {
|
| 811 |
+
"success": True,
|
| 812 |
+
"message": "Refresh task started",
|
| 813 |
+
"data": {"started": True}
|
| 814 |
+
}
|
| 815 |
+
except Exception as e:
|
| 816 |
+
logger.error(f"[Admin] Token refresh error: {e}")
|
| 817 |
+
raise HTTPException(status_code=500, detail={"error": f"Refresh failed: {e}", "code": "REFRESH_ALL_ERROR"})
|
| 818 |
+
|
| 819 |
+
|
| 820 |
+
@router.get("/api/tokens/refresh-progress")
|
| 821 |
+
async def get_refresh_progress(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 822 |
+
"""Get token refresh progress"""
|
| 823 |
+
try:
|
| 824 |
+
progress = token_manager.get_refresh_progress()
|
| 825 |
+
return {"success": True, "data": progress}
|
| 826 |
+
except Exception as e:
|
| 827 |
+
logger.error(f"[Admin] Refresh progress error: {e}")
|
| 828 |
+
raise HTTPException(status_code=500, detail={"error": f"Failed to get progress: {e}"})
|
| 829 |
+
|
| 830 |
+
|
| 831 |
+
@router.get("/api/request-stats")
|
| 832 |
+
async def get_request_stats(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 833 |
+
"""Get request stats"""
|
| 834 |
+
try:
|
| 835 |
+
stats = request_stats.get_stats(hours=24, days=7)
|
| 836 |
+
return {"success": True, "data": stats}
|
| 837 |
+
except Exception as e:
|
| 838 |
+
logger.error(f"[Admin] Request stats error: {e}")
|
| 839 |
+
raise HTTPException(status_code=500, detail={"error": f"Failed to get stats: {e}"})
|
| 840 |
+
|
| 841 |
+
|
| 842 |
+
# === API Key Management ===
|
| 843 |
+
|
| 844 |
+
class AddKeyRequest(BaseModel):
|
| 845 |
+
name: str
|
| 846 |
+
|
| 847 |
+
|
| 848 |
+
class UpdateKeyNameRequest(BaseModel):
|
| 849 |
+
key: str
|
| 850 |
+
name: str
|
| 851 |
+
|
| 852 |
+
|
| 853 |
+
class UpdateKeyStatusRequest(BaseModel):
|
| 854 |
+
key: str
|
| 855 |
+
is_active: bool
|
| 856 |
+
|
| 857 |
+
|
| 858 |
+
class BatchAddKeyRequest(BaseModel):
|
| 859 |
+
name_prefix: str
|
| 860 |
+
count: int
|
| 861 |
+
|
| 862 |
+
|
| 863 |
+
class BatchDeleteKeyRequest(BaseModel):
|
| 864 |
+
keys: List[str]
|
| 865 |
+
|
| 866 |
+
|
| 867 |
+
class BatchUpdateKeyStatusRequest(BaseModel):
|
| 868 |
+
keys: List[str]
|
| 869 |
+
is_active: bool
|
| 870 |
+
|
| 871 |
+
|
| 872 |
+
@router.get("/api/keys")
|
| 873 |
+
async def list_keys(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 874 |
+
"""Get key list"""
|
| 875 |
+
try:
|
| 876 |
+
from app.services.api_keys import api_key_manager
|
| 877 |
+
if not api_key_manager._loaded:
|
| 878 |
+
await api_key_manager.init()
|
| 879 |
+
|
| 880 |
+
keys = api_key_manager.get_all_keys()
|
| 881 |
+
|
| 882 |
+
# Include default key (optional)
|
| 883 |
+
global_key = setting.grok_config.get("api_key")
|
| 884 |
+
result_keys = []
|
| 885 |
+
|
| 886 |
+
# Convert and mask
|
| 887 |
+
for k in keys:
|
| 888 |
+
result_keys.append({
|
| 889 |
+
**k,
|
| 890 |
+
"display_key": f"{k['key'][:6]}...{k['key'][-4:]}"
|
| 891 |
+
})
|
| 892 |
+
|
| 893 |
+
return {
|
| 894 |
+
"success": True,
|
| 895 |
+
"data": result_keys,
|
| 896 |
+
"global_key_set": bool(global_key)
|
| 897 |
+
}
|
| 898 |
+
except Exception as e:
|
| 899 |
+
logger.error(f"[Admin] Failed to get key list: {e}")
|
| 900 |
+
raise HTTPException(status_code=500, detail={"error": f"Fetch failed: {e}"})
|
| 901 |
+
|
| 902 |
+
|
| 903 |
+
@router.post("/api/keys/add")
|
| 904 |
+
async def add_key(request: AddKeyRequest, _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 905 |
+
"""Add key"""
|
| 906 |
+
try:
|
| 907 |
+
from app.services.api_keys import api_key_manager
|
| 908 |
+
new_key = await api_key_manager.add_key(request.name)
|
| 909 |
+
return {"success": True, "data": new_key, "message": "Key created successfully"}
|
| 910 |
+
except Exception as e:
|
| 911 |
+
logger.error(f"[Admin] Failed to add key: {e}")
|
| 912 |
+
raise HTTPException(status_code=500, detail={"error": f"Add failed: {e}"})
|
| 913 |
+
|
| 914 |
+
|
| 915 |
+
@router.post("/api/keys/delete")
|
| 916 |
+
async def delete_key(request: Dict[str, str], _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 917 |
+
"""Delete key"""
|
| 918 |
+
try:
|
| 919 |
+
from app.services.api_keys import api_key_manager
|
| 920 |
+
key = request.get("key")
|
| 921 |
+
if not key:
|
| 922 |
+
raise ValueError("Key cannot be empty")
|
| 923 |
+
|
| 924 |
+
if await api_key_manager.delete_key(key):
|
| 925 |
+
return {"success": True, "message": "Key deleted successfully"}
|
| 926 |
+
return {"success": False, "message": "Key not found"}
|
| 927 |
+
except Exception as e:
|
| 928 |
+
logger.error(f"[Admin] Failed to delete key: {e}")
|
| 929 |
+
raise HTTPException(status_code=500, detail={"error": f"Delete failed: {e}"})
|
| 930 |
+
|
| 931 |
+
|
| 932 |
+
@router.post("/api/keys/status")
|
| 933 |
+
async def update_key_status(request: UpdateKeyStatusRequest, _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 934 |
+
"""Update key status"""
|
| 935 |
+
try:
|
| 936 |
+
from app.services.api_keys import api_key_manager
|
| 937 |
+
if await api_key_manager.update_key_status(request.key, request.is_active):
|
| 938 |
+
return {"success": True, "message": "Status updated successfully"}
|
| 939 |
+
return {"success": False, "message": "Key not found"}
|
| 940 |
+
except Exception as e:
|
| 941 |
+
logger.error(f"[Admin] Failed to update key status: {e}")
|
| 942 |
+
raise HTTPException(status_code=500, detail={"error": f"Update failed: {e}"})
|
| 943 |
+
|
| 944 |
+
|
| 945 |
+
@router.post("/api/keys/name")
|
| 946 |
+
async def update_key_name(request: UpdateKeyNameRequest, _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 947 |
+
"""Update key note"""
|
| 948 |
+
try:
|
| 949 |
+
from app.services.api_keys import api_key_manager
|
| 950 |
+
if await api_key_manager.update_key_name(request.key, request.name):
|
| 951 |
+
return {"success": True, "message": "Note updated successfully"}
|
| 952 |
+
return {"success": False, "message": "Key not found"}
|
| 953 |
+
except Exception as e:
|
| 954 |
+
logger.error(f"[Admin] Failed to update key note: {e}")
|
| 955 |
+
raise HTTPException(status_code=500, detail={"error": f"Update failed: {e}"})
|
| 956 |
+
|
| 957 |
+
|
| 958 |
+
@router.post("/api/keys/batch-add")
|
| 959 |
+
async def batch_add_keys(request: BatchAddKeyRequest, _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 960 |
+
"""Batch add keys"""
|
| 961 |
+
try:
|
| 962 |
+
from app.services.api_keys import api_key_manager
|
| 963 |
+
new_keys = await api_key_manager.batch_add_keys(request.name_prefix, request.count)
|
| 964 |
+
return {"success": True, "data": new_keys, "message": f"Successfully created {len(new_keys)} keys"}
|
| 965 |
+
except Exception as e:
|
| 966 |
+
logger.error(f"[Admin] Batch add keys failed: {e}")
|
| 967 |
+
raise HTTPException(status_code=500, detail={"error": f"Batch add failed: {e}"})
|
| 968 |
+
|
| 969 |
+
|
| 970 |
+
@router.post("/api/keys/batch-delete")
|
| 971 |
+
async def batch_delete_keys(request: BatchDeleteKeyRequest, _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 972 |
+
"""Batch delete keys"""
|
| 973 |
+
try:
|
| 974 |
+
from app.services.api_keys import api_key_manager
|
| 975 |
+
deleted_count = await api_key_manager.batch_delete_keys(request.keys)
|
| 976 |
+
return {"success": True, "message": f"Successfully deleted {deleted_count} keys"}
|
| 977 |
+
except Exception as e:
|
| 978 |
+
logger.error(f"[Admin] Batch delete keys failed: {e}")
|
| 979 |
+
raise HTTPException(status_code=500, detail={"error": f"Batch delete failed: {e}"})
|
| 980 |
+
|
| 981 |
+
|
| 982 |
+
@router.post("/api/keys/batch-status")
|
| 983 |
+
async def batch_update_key_status(request: BatchUpdateKeyStatusRequest, _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 984 |
+
"""Batch update key status"""
|
| 985 |
+
try:
|
| 986 |
+
from app.services.api_keys import api_key_manager
|
| 987 |
+
updated_count = await api_key_manager.batch_update_keys_status(request.keys, request.is_active)
|
| 988 |
+
return {"success": True, "message": f"Successfully updated {updated_count} keys"}
|
| 989 |
+
except Exception as e:
|
| 990 |
+
logger.error(f"[Admin] Batch update key status failed: {e}")
|
| 991 |
+
raise HTTPException(status_code=500, detail={"error": f"Batch update failed: {e}"})
|
| 992 |
+
|
| 993 |
+
|
| 994 |
+
# === Audit logs ===
|
| 995 |
+
|
| 996 |
+
@router.get("/api/logs")
|
| 997 |
+
async def get_logs(limit: int = 1000, _: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 998 |
+
"""Get request logs"""
|
| 999 |
+
try:
|
| 1000 |
+
from app.services.request_logger import request_logger
|
| 1001 |
+
logs = await request_logger.get_logs(limit)
|
| 1002 |
+
return {"success": True, "data": logs}
|
| 1003 |
+
except Exception as e:
|
| 1004 |
+
logger.error(f"[Admin] Failed to get logs: {e}")
|
| 1005 |
+
raise HTTPException(status_code=500, detail={"error": f"Fetch failed: {e}"})
|
| 1006 |
+
|
| 1007 |
+
@router.post("/api/logs/clear")
|
| 1008 |
+
async def clear_logs(_: bool = Depends(verify_admin_session)) -> Dict[str, Any]:
|
| 1009 |
+
"""Clear logs"""
|
| 1010 |
+
try:
|
| 1011 |
+
from app.services.request_logger import request_logger
|
| 1012 |
+
await request_logger.clear_logs()
|
| 1013 |
+
return {"success": True, "message": "Logs cleared"}
|
| 1014 |
+
except Exception as e:
|
| 1015 |
+
logger.error(f"[Admin] Failed to clear logs: {e}")
|
| 1016 |
+
raise HTTPException(status_code=500, detail={"error": f"Clear failed: {e}"})
|
app/api/v1/chat.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Chat API routes - OpenAI-compatible chat endpoints"""
|
| 2 |
+
|
| 3 |
+
import time
|
| 4 |
+
from fastapi import APIRouter, Depends, HTTPException, Request
|
| 5 |
+
from typing import Optional, Dict, Any
|
| 6 |
+
from fastapi.responses import StreamingResponse
|
| 7 |
+
|
| 8 |
+
from app.core.auth import auth_manager
|
| 9 |
+
from app.core.exception import GrokApiException
|
| 10 |
+
from app.core.logger import logger
|
| 11 |
+
from app.services.grok.client import GrokClient
|
| 12 |
+
from app.models.openai_schema import OpenAIChatRequest
|
| 13 |
+
from app.services.request_stats import request_stats
|
| 14 |
+
from app.services.request_logger import request_logger
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
router = APIRouter(prefix="/chat", tags=["chat"])
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@router.post("/completions", response_model=None)
|
| 21 |
+
async def chat_completions(
|
| 22 |
+
request: Request,
|
| 23 |
+
body: OpenAIChatRequest,
|
| 24 |
+
auth_info: Dict[str, Any] = Depends(auth_manager.verify)
|
| 25 |
+
):
|
| 26 |
+
"""Create chat completions (streaming and non-streaming)"""
|
| 27 |
+
start_time = time.time()
|
| 28 |
+
model = body.model
|
| 29 |
+
ip = request.client.host
|
| 30 |
+
key_name = auth_info.get("name", "Unknown")
|
| 31 |
+
|
| 32 |
+
status_code = 200
|
| 33 |
+
error_msg = ""
|
| 34 |
+
|
| 35 |
+
try:
|
| 36 |
+
logger.info(f"[Chat] Received chat request: {key_name} @ {ip}")
|
| 37 |
+
|
| 38 |
+
# Call Grok client
|
| 39 |
+
result = await GrokClient.openai_to_grok(body.model_dump())
|
| 40 |
+
|
| 41 |
+
# Record success stats
|
| 42 |
+
await request_stats.record_request(model, success=True)
|
| 43 |
+
|
| 44 |
+
# Streaming response
|
| 45 |
+
if body.stream:
|
| 46 |
+
async def stream_wrapper():
|
| 47 |
+
try:
|
| 48 |
+
async for chunk in result:
|
| 49 |
+
yield chunk
|
| 50 |
+
finally:
|
| 51 |
+
# Log when streaming ends
|
| 52 |
+
duration = time.time() - start_time
|
| 53 |
+
await request_logger.add_log(ip, model, duration, 200, key_name)
|
| 54 |
+
|
| 55 |
+
return StreamingResponse(
|
| 56 |
+
content=stream_wrapper(),
|
| 57 |
+
media_type="text/event-stream",
|
| 58 |
+
headers={
|
| 59 |
+
"Cache-Control": "no-cache",
|
| 60 |
+
"Connection": "keep-alive",
|
| 61 |
+
"X-Accel-Buffering": "no"
|
| 62 |
+
}
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
# Non-streaming response - log it
|
| 66 |
+
duration = time.time() - start_time
|
| 67 |
+
await request_logger.add_log(ip, model, duration, 200, key_name)
|
| 68 |
+
return result
|
| 69 |
+
|
| 70 |
+
except GrokApiException as e:
|
| 71 |
+
status_code = e.status_code or 500
|
| 72 |
+
error_msg = str(e)
|
| 73 |
+
await request_stats.record_request(model, success=False)
|
| 74 |
+
logger.error(f"[Chat] Grok API error: {e} - Details: {e.details}")
|
| 75 |
+
|
| 76 |
+
duration = time.time() - start_time
|
| 77 |
+
await request_logger.add_log(ip, model, duration, status_code, key_name, error=error_msg)
|
| 78 |
+
|
| 79 |
+
raise HTTPException(
|
| 80 |
+
status_code=status_code,
|
| 81 |
+
detail={
|
| 82 |
+
"error": {
|
| 83 |
+
"message": error_msg,
|
| 84 |
+
"type": e.error_code or "grok_api_error",
|
| 85 |
+
"code": e.error_code or "unknown"
|
| 86 |
+
}
|
| 87 |
+
}
|
| 88 |
+
)
|
| 89 |
+
except Exception as e:
|
| 90 |
+
status_code = 500
|
| 91 |
+
error_msg = str(e)
|
| 92 |
+
await request_stats.record_request(model, success=False)
|
| 93 |
+
logger.error(f"[Chat] Processing failed: {e}")
|
| 94 |
+
|
| 95 |
+
duration = time.time() - start_time
|
| 96 |
+
await request_logger.add_log(ip, model, duration, status_code, key_name, error=error_msg)
|
| 97 |
+
|
| 98 |
+
raise HTTPException(
|
| 99 |
+
status_code=500,
|
| 100 |
+
detail={
|
| 101 |
+
"error": {
|
| 102 |
+
"message": "Internal server error",
|
| 103 |
+
"type": "internal_error",
|
| 104 |
+
"code": "internal_server_error"
|
| 105 |
+
}
|
| 106 |
+
}
|
| 107 |
+
)
|
app/api/v1/images.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Media API - serve cached images and video files"""
|
| 2 |
+
|
| 3 |
+
from fastapi import APIRouter, HTTPException
|
| 4 |
+
from fastapi.responses import FileResponse
|
| 5 |
+
|
| 6 |
+
from app.core.logger import logger
|
| 7 |
+
from app.services.grok.cache import image_cache_service, video_cache_service
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
router = APIRouter()
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@router.get("/images/{img_path:path}")
|
| 14 |
+
async def get_image(img_path: str):
|
| 15 |
+
"""Get cached image or video
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
img_path: File path (format: users-xxx-generated-xxx-image.jpg)
|
| 19 |
+
"""
|
| 20 |
+
try:
|
| 21 |
+
# Transform path (hyphens -> slashes)
|
| 22 |
+
original_path = "/" + img_path.replace('-', '/')
|
| 23 |
+
|
| 24 |
+
# Detect type
|
| 25 |
+
is_video = any(original_path.lower().endswith(ext) for ext in ['.mp4', '.webm', '.mov', '.avi'])
|
| 26 |
+
|
| 27 |
+
if is_video:
|
| 28 |
+
cache_path = video_cache_service.get_cached(original_path)
|
| 29 |
+
media_type = "video/mp4"
|
| 30 |
+
else:
|
| 31 |
+
cache_path = image_cache_service.get_cached(original_path)
|
| 32 |
+
media_type = "image/jpeg"
|
| 33 |
+
|
| 34 |
+
if cache_path and cache_path.exists():
|
| 35 |
+
logger.debug(f"[MediaAPI] Returning cached file: {cache_path}")
|
| 36 |
+
return FileResponse(
|
| 37 |
+
path=str(cache_path),
|
| 38 |
+
media_type=media_type,
|
| 39 |
+
headers={
|
| 40 |
+
"Cache-Control": "public, max-age=86400",
|
| 41 |
+
"Access-Control-Allow-Origin": "*"
|
| 42 |
+
}
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
# File not found
|
| 46 |
+
logger.warning(f"[MediaAPI] Not found: {original_path}")
|
| 47 |
+
raise HTTPException(status_code=404, detail="File not found")
|
| 48 |
+
|
| 49 |
+
except HTTPException:
|
| 50 |
+
raise
|
| 51 |
+
except Exception as e:
|
| 52 |
+
logger.error(f"[MediaAPI] Failed to fetch: {e}")
|
| 53 |
+
raise HTTPException(status_code=500, detail=str(e))
|
app/api/v1/models.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Models API - OpenAI-compatible model list endpoints"""
|
| 2 |
+
|
| 3 |
+
import time
|
| 4 |
+
from typing import Dict, Any, List, Optional
|
| 5 |
+
from fastapi import APIRouter, HTTPException, Depends
|
| 6 |
+
|
| 7 |
+
from app.models.grok_models import Models
|
| 8 |
+
from app.core.auth import auth_manager
|
| 9 |
+
from app.core.logger import logger
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
router = APIRouter(tags=["models"])
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@router.get("/models")
|
| 16 |
+
async def list_models(_: Optional[str] = Depends(auth_manager.verify)) -> Dict[str, Any]:
|
| 17 |
+
"""Get available model list"""
|
| 18 |
+
try:
|
| 19 |
+
logger.debug("[Models] Requesting model list")
|
| 20 |
+
|
| 21 |
+
timestamp = int(time.time())
|
| 22 |
+
model_data: List[Dict[str, Any]] = []
|
| 23 |
+
|
| 24 |
+
for model in Models:
|
| 25 |
+
model_id = model.value
|
| 26 |
+
config = Models.get_model_info(model_id)
|
| 27 |
+
|
| 28 |
+
model_info = {
|
| 29 |
+
"id": model_id,
|
| 30 |
+
"object": "model",
|
| 31 |
+
"created": timestamp,
|
| 32 |
+
"owned_by": "x-ai",
|
| 33 |
+
"display_name": config.get("display_name", model_id),
|
| 34 |
+
"description": config.get("description", ""),
|
| 35 |
+
"raw_model_path": config.get("raw_model_path", f"xai/{model_id}"),
|
| 36 |
+
"default_temperature": config.get("default_temperature", 1.0),
|
| 37 |
+
"default_max_output_tokens": config.get("default_max_output_tokens", 8192),
|
| 38 |
+
"supported_max_output_tokens": config.get("supported_max_output_tokens", 131072),
|
| 39 |
+
"default_top_p": config.get("default_top_p", 0.95)
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
model_data.append(model_info)
|
| 43 |
+
|
| 44 |
+
logger.debug(f"[Models] Returned {len(model_data)} models")
|
| 45 |
+
return {"object": "list", "data": model_data}
|
| 46 |
+
|
| 47 |
+
except Exception as e:
|
| 48 |
+
logger.error(f"[Models] Failed to get list: {e}")
|
| 49 |
+
raise HTTPException(
|
| 50 |
+
status_code=500,
|
| 51 |
+
detail={
|
| 52 |
+
"error": {
|
| 53 |
+
"message": f"Failed to retrieve models: {e}",
|
| 54 |
+
"type": "internal_error",
|
| 55 |
+
"code": "model_list_error"
|
| 56 |
+
}
|
| 57 |
+
}
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@router.get("/models/{model_id}")
|
| 62 |
+
async def get_model(model_id: str, _: Optional[str] = Depends(auth_manager.verify)) -> Dict[str, Any]:
|
| 63 |
+
"""Get specific model info"""
|
| 64 |
+
try:
|
| 65 |
+
logger.debug(f"[Models] Requesting model: {model_id}")
|
| 66 |
+
|
| 67 |
+
# Validate model
|
| 68 |
+
if not Models.is_valid_model(model_id):
|
| 69 |
+
logger.warning(f"[Models] Model not found: {model_id}")
|
| 70 |
+
raise HTTPException(
|
| 71 |
+
status_code=404,
|
| 72 |
+
detail={
|
| 73 |
+
"error": {
|
| 74 |
+
"message": f"Model '{model_id}' not found",
|
| 75 |
+
"type": "invalid_request_error",
|
| 76 |
+
"code": "model_not_found"
|
| 77 |
+
}
|
| 78 |
+
}
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
timestamp = int(time.time())
|
| 82 |
+
config = Models.get_model_info(model_id)
|
| 83 |
+
|
| 84 |
+
model_info = {
|
| 85 |
+
"id": model_id,
|
| 86 |
+
"object": "model",
|
| 87 |
+
"created": timestamp,
|
| 88 |
+
"owned_by": "x-ai",
|
| 89 |
+
"display_name": config.get("display_name", model_id),
|
| 90 |
+
"description": config.get("description", ""),
|
| 91 |
+
"raw_model_path": config.get("raw_model_path", f"xai/{model_id}"),
|
| 92 |
+
"default_temperature": config.get("default_temperature", 1.0),
|
| 93 |
+
"default_max_output_tokens": config.get("default_max_output_tokens", 8192),
|
| 94 |
+
"supported_max_output_tokens": config.get("supported_max_output_tokens", 131072),
|
| 95 |
+
"default_top_p": config.get("default_top_p", 0.95)
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
logger.debug(f"[Models] Returned model: {model_id}")
|
| 99 |
+
return model_info
|
| 100 |
+
|
| 101 |
+
except HTTPException:
|
| 102 |
+
raise
|
| 103 |
+
except Exception as e:
|
| 104 |
+
logger.error(f"[Models] Failed to get model: {e}")
|
| 105 |
+
raise HTTPException(
|
| 106 |
+
status_code=500,
|
| 107 |
+
detail={
|
| 108 |
+
"error": {
|
| 109 |
+
"message": f"Failed to retrieve model: {e}",
|
| 110 |
+
"type": "internal_error",
|
| 111 |
+
"code": "model_retrieve_error"
|
| 112 |
+
}
|
| 113 |
+
}
|
| 114 |
+
)
|
app/core/auth.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Authentication module - API token verification"""
|
| 2 |
+
|
| 3 |
+
from typing import Optional, Dict
|
| 4 |
+
from fastapi import Depends, HTTPException
|
| 5 |
+
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
| 6 |
+
|
| 7 |
+
from app.core.config import setting
|
| 8 |
+
from app.core.logger import logger
|
| 9 |
+
from app.services.api_keys import api_key_manager
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Bearer security scheme
|
| 13 |
+
security = HTTPBearer(auto_error=False)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def _build_error(message: str, code: str = "invalid_token") -> dict:
|
| 17 |
+
"""Build authentication error"""
|
| 18 |
+
return {
|
| 19 |
+
"error": {
|
| 20 |
+
"message": message,
|
| 21 |
+
"type": "authentication_error",
|
| 22 |
+
"code": code
|
| 23 |
+
}
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class AuthManager:
|
| 28 |
+
"""Authentication manager - verify API tokens"""
|
| 29 |
+
|
| 30 |
+
@staticmethod
|
| 31 |
+
async def verify(credentials: Optional[HTTPAuthorizationCredentials] = Depends(security)) -> Dict:
|
| 32 |
+
"""Verify token and return key info"""
|
| 33 |
+
api_key = setting.grok_config.get("api_key")
|
| 34 |
+
|
| 35 |
+
# Initialization check
|
| 36 |
+
if not hasattr(api_key_manager, '_keys'):
|
| 37 |
+
await api_key_manager.init()
|
| 38 |
+
|
| 39 |
+
# Check token
|
| 40 |
+
if not credentials:
|
| 41 |
+
# Skip when no global key and no multi-keys (development mode)
|
| 42 |
+
if not api_key and not api_key_manager.get_all_keys():
|
| 43 |
+
logger.debug("[Auth] API_KEY not set, skipping verification")
|
| 44 |
+
return {"key": None, "name": "Anonymous"}
|
| 45 |
+
|
| 46 |
+
raise HTTPException(
|
| 47 |
+
status_code=401,
|
| 48 |
+
detail=_build_error("Missing authentication token", "missing_token")
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
token = credentials.credentials
|
| 52 |
+
|
| 53 |
+
# Verify token (supports multiple keys)
|
| 54 |
+
key_info = api_key_manager.validate_key(token)
|
| 55 |
+
|
| 56 |
+
if key_info:
|
| 57 |
+
return key_info
|
| 58 |
+
|
| 59 |
+
raise HTTPException(
|
| 60 |
+
status_code=401,
|
| 61 |
+
detail=_build_error(f"Invalid token, length: {len(token)}", "invalid_token")
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# Global instance
|
| 66 |
+
auth_manager = AuthManager()
|
app/core/config.py
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Config manager - manage app config read/write"""
|
| 2 |
+
import os
|
| 3 |
+
import toml
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from typing import Dict, Any, Optional, Literal
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# Default config
|
| 9 |
+
DEFAULT_GROK = {
|
| 10 |
+
"api_key": "",
|
| 11 |
+
"proxy_url": "",
|
| 12 |
+
"proxy_pool_url": "",
|
| 13 |
+
"proxy_pool_interval": 300,
|
| 14 |
+
"cache_proxy_url": "",
|
| 15 |
+
"cf_clearance": "",
|
| 16 |
+
"x_statsig_id": "ZTpUeXBlRXJyb3I6IENhbm5vdCByZWFkIHByb3BlcnRpZXMgb2YgdW5kZWZpbmVkIChyZWFkaW5nICdjaGlsZE5vZGVzJyk=",
|
| 17 |
+
"dynamic_statsig": False,
|
| 18 |
+
"filtered_tags": "xaiartifact,xai:tool_usage_card",
|
| 19 |
+
"show_thinking": True,
|
| 20 |
+
"temporary": False,
|
| 21 |
+
"max_upload_concurrency": 20,
|
| 22 |
+
"max_request_concurrency": 100,
|
| 23 |
+
"stream_first_response_timeout": 30,
|
| 24 |
+
"stream_chunk_timeout": 120,
|
| 25 |
+
"stream_total_timeout": 600,
|
| 26 |
+
"retry_status_codes": [401, 429], # Retryable HTTP status codes
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
DEFAULT_GLOBAL = {
|
| 30 |
+
"base_url": "https://tejmar-grok2api-private.hf.space",
|
| 31 |
+
"log_level": "INFO",
|
| 32 |
+
"image_mode": "url",
|
| 33 |
+
"admin_password": "!!tejmar",
|
| 34 |
+
"admin_username": "admin",
|
| 35 |
+
"image_cache_max_size_mb": 512,
|
| 36 |
+
"video_cache_max_size_mb": 1024,
|
| 37 |
+
"image_download_timeout": 30,
|
| 38 |
+
"image_download_max_size_mb": 20,
|
| 39 |
+
"max_upload_concurrency": 20, # Max concurrent uploads
|
| 40 |
+
"max_request_concurrency": 50, # Max concurrent requests
|
| 41 |
+
"batch_save_interval": 1.0, # Batch save interval (seconds)
|
| 42 |
+
"batch_save_threshold": 10 # Change count threshold to trigger batch save
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class ConfigManager:
|
| 47 |
+
"""Config manager"""
|
| 48 |
+
|
| 49 |
+
def __init__(self) -> None:
|
| 50 |
+
data_dir_env = os.getenv("DATA_DIR")
|
| 51 |
+
if data_dir_env:
|
| 52 |
+
data_dir = Path(data_dir_env)
|
| 53 |
+
elif Path("/data").exists():
|
| 54 |
+
data_dir = Path("/data")
|
| 55 |
+
else:
|
| 56 |
+
data_dir = Path(__file__).parents[2] / "data"
|
| 57 |
+
self.config_path: Path = data_dir / "setting.toml"
|
| 58 |
+
self._storage: Optional[Any] = None
|
| 59 |
+
self.global_config: Dict[str, Any] = {}
|
| 60 |
+
self.grok_config: Dict[str, Any] = {}
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def _ensure_exists(self) -> None:
|
| 64 |
+
"""Ensure config exists"""
|
| 65 |
+
if not self.config_path.exists():
|
| 66 |
+
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
| 67 |
+
self._create_default()
|
| 68 |
+
|
| 69 |
+
def _create_default(self) -> None:
|
| 70 |
+
default = {"grok": DEFAULT_GROK.copy(), "global": DEFAULT_GLOBAL.copy()}
|
| 71 |
+
# If provided, use runtime base url
|
| 72 |
+
base_url = os.getenv("BASE_URL")
|
| 73 |
+
if base_url:
|
| 74 |
+
default["global"]["base_url"] = base_url
|
| 75 |
+
with open(self.config_path, "w", encoding="utf-8") as f:
|
| 76 |
+
toml.dump(default, f)
|
| 77 |
+
|
| 78 |
+
def _normalize_proxy(self, proxy: str) -> str:
|
| 79 |
+
"""Normalize proxy URL (sock5/socks5 -> socks5h://)"""
|
| 80 |
+
if not proxy:
|
| 81 |
+
return proxy
|
| 82 |
+
|
| 83 |
+
proxy = proxy.strip()
|
| 84 |
+
if proxy.startswith("sock5h://"):
|
| 85 |
+
proxy = proxy.replace("sock5h://", "socks5h://", 1)
|
| 86 |
+
if proxy.startswith("sock5://"):
|
| 87 |
+
proxy = proxy.replace("sock5://", "socks5://", 1)
|
| 88 |
+
if proxy.startswith("socks5://"):
|
| 89 |
+
return proxy.replace("socks5://", "socks5h://", 1)
|
| 90 |
+
return proxy
|
| 91 |
+
|
| 92 |
+
def _normalize_cf(self, cf: str) -> str:
|
| 93 |
+
"""Normalize CF clearance (auto prefix)"""
|
| 94 |
+
if cf and not cf.startswith("cf_clearance="):
|
| 95 |
+
return f"cf_clearance={cf}"
|
| 96 |
+
return cf
|
| 97 |
+
|
| 98 |
+
def set_storage(self, storage: Any) -> None:
|
| 99 |
+
"""Set storage instance"""
|
| 100 |
+
self._storage = storage
|
| 101 |
+
config_file = getattr(storage, "config_file", None)
|
| 102 |
+
data_dir = getattr(storage, "data_dir", None)
|
| 103 |
+
if config_file:
|
| 104 |
+
self.config_path = Path(config_file)
|
| 105 |
+
elif data_dir:
|
| 106 |
+
self.config_path = Path(data_dir) / "setting.toml"
|
| 107 |
+
|
| 108 |
+
@staticmethod
|
| 109 |
+
def _merge_defaults(config: Dict[str, Any], defaults: Dict[str, Any]) -> Dict[str, Any]:
|
| 110 |
+
"""Merge stored config with defaults."""
|
| 111 |
+
merged = defaults.copy()
|
| 112 |
+
if config:
|
| 113 |
+
merged.update(config)
|
| 114 |
+
return merged
|
| 115 |
+
|
| 116 |
+
def load(self, section: Literal["global", "grok"]) -> Dict[str, Any]:
|
| 117 |
+
"""Load config section"""
|
| 118 |
+
try:
|
| 119 |
+
with open(self.config_path, "r", encoding="utf-8") as f:
|
| 120 |
+
config = toml.load(f)[section]
|
| 121 |
+
|
| 122 |
+
# Normalize Grok config
|
| 123 |
+
if section == "grok":
|
| 124 |
+
if "proxy_url" in config:
|
| 125 |
+
config["proxy_url"] = self._normalize_proxy(config["proxy_url"])
|
| 126 |
+
if "cache_proxy_url" in config:
|
| 127 |
+
config["cache_proxy_url"] = self._normalize_proxy(config["cache_proxy_url"])
|
| 128 |
+
if "cf_clearance" in config:
|
| 129 |
+
config["cf_clearance"] = self._normalize_cf(config["cf_clearance"])
|
| 130 |
+
|
| 131 |
+
return config
|
| 132 |
+
except Exception as e:
|
| 133 |
+
raise Exception(f"[Setting] Failed to load config: {e}") from e
|
| 134 |
+
|
| 135 |
+
async def reload(self) -> None:
|
| 136 |
+
if self._storage:
|
| 137 |
+
config = await self._storage.load_config()
|
| 138 |
+
self.global_config = self._merge_defaults(config.get("global", {}), DEFAULT_GLOBAL)
|
| 139 |
+
self.grok_config = self._merge_defaults(config.get("grok", {}), DEFAULT_GROK)
|
| 140 |
+
else:
|
| 141 |
+
self._ensure_exists()
|
| 142 |
+
self.global_config = self._merge_defaults(self.load("global"), DEFAULT_GLOBAL)
|
| 143 |
+
self.grok_config = self._merge_defaults(self.load("grok"), DEFAULT_GROK)
|
| 144 |
+
# Always enforce BASE_URL from environment if provided
|
| 145 |
+
base_url = os.getenv("BASE_URL")
|
| 146 |
+
if base_url:
|
| 147 |
+
self.global_config["base_url"] = base_url
|
| 148 |
+
|
| 149 |
+
async def _save_file(self, updates: Dict[str, Dict[str, Any]]) -> None:
|
| 150 |
+
"""Save to file"""
|
| 151 |
+
import aiofiles
|
| 152 |
+
|
| 153 |
+
async with aiofiles.open(self.config_path, "r", encoding="utf-8") as f:
|
| 154 |
+
config = toml.loads(await f.read())
|
| 155 |
+
|
| 156 |
+
config.setdefault("global", {})
|
| 157 |
+
config.setdefault("grok", {})
|
| 158 |
+
|
| 159 |
+
for section, data in updates.items():
|
| 160 |
+
if section in config:
|
| 161 |
+
config[section].update(data)
|
| 162 |
+
|
| 163 |
+
async with aiofiles.open(self.config_path, "w", encoding="utf-8") as f:
|
| 164 |
+
await f.write(toml.dumps(config))
|
| 165 |
+
|
| 166 |
+
async def _save_storage(self, updates: Dict[str, Dict[str, Any]]) -> None:
|
| 167 |
+
"""Save to storage"""
|
| 168 |
+
config = await self._storage.load_config()
|
| 169 |
+
|
| 170 |
+
config.setdefault("global", {})
|
| 171 |
+
config.setdefault("grok", {})
|
| 172 |
+
|
| 173 |
+
for section, data in updates.items():
|
| 174 |
+
if section in config:
|
| 175 |
+
config[section].update(data)
|
| 176 |
+
|
| 177 |
+
await self._storage.save_config(config)
|
| 178 |
+
|
| 179 |
+
def _prepare_grok(self, grok: Dict[str, Any]) -> Dict[str, Any]:
|
| 180 |
+
"""Prepare Grok config (remove prefix)"""
|
| 181 |
+
processed = grok.copy()
|
| 182 |
+
if "cf_clearance" in processed:
|
| 183 |
+
cf = processed["cf_clearance"]
|
| 184 |
+
if cf and cf.startswith("cf_clearance="):
|
| 185 |
+
processed["cf_clearance"] = cf.replace("cf_clearance=", "", 1)
|
| 186 |
+
return processed
|
| 187 |
+
|
| 188 |
+
async def save(self, global_config: Optional[Dict[str, Any]] = None, grok_config: Optional[Dict[str, Any]] = None) -> None:
|
| 189 |
+
"""Save config"""
|
| 190 |
+
updates = {}
|
| 191 |
+
|
| 192 |
+
if global_config:
|
| 193 |
+
updates["global"] = global_config
|
| 194 |
+
if grok_config:
|
| 195 |
+
updates["grok"] = self._prepare_grok(grok_config)
|
| 196 |
+
|
| 197 |
+
# Choose storage backend
|
| 198 |
+
if self._storage:
|
| 199 |
+
await self._save_storage(updates)
|
| 200 |
+
else:
|
| 201 |
+
await self._save_file(updates)
|
| 202 |
+
|
| 203 |
+
await self.reload()
|
| 204 |
+
|
| 205 |
+
async def get_proxy_async(self, proxy_type: Literal["service", "cache"] = "service") -> str:
|
| 206 |
+
"""Async get proxy URL (supports proxy pool)
|
| 207 |
+
|
| 208 |
+
Args:
|
| 209 |
+
proxy_type: Proxy type
|
| 210 |
+
- service: Service proxy (client/upload)
|
| 211 |
+
- cache: Cache proxy (cache)
|
| 212 |
+
"""
|
| 213 |
+
from app.core.proxy_pool import proxy_pool
|
| 214 |
+
|
| 215 |
+
if proxy_type == "cache":
|
| 216 |
+
cache_proxy = self.grok_config.get("cache_proxy_url", "")
|
| 217 |
+
if cache_proxy:
|
| 218 |
+
return cache_proxy
|
| 219 |
+
|
| 220 |
+
# Get from proxy pool
|
| 221 |
+
return await proxy_pool.get_proxy() or ""
|
| 222 |
+
|
| 223 |
+
def get_proxy(self, proxy_type: Literal["service", "cache"] = "service") -> str:
|
| 224 |
+
"""Get proxy URL (sync, for backward compatibility)
|
| 225 |
+
|
| 226 |
+
Args:
|
| 227 |
+
proxy_type: Proxy type
|
| 228 |
+
- service: Service proxy (client/upload)
|
| 229 |
+
- cache: Cache proxy (cache)
|
| 230 |
+
"""
|
| 231 |
+
from app.core.proxy_pool import proxy_pool
|
| 232 |
+
|
| 233 |
+
if proxy_type == "cache":
|
| 234 |
+
cache_proxy = self.grok_config.get("cache_proxy_url", "")
|
| 235 |
+
if cache_proxy:
|
| 236 |
+
return cache_proxy
|
| 237 |
+
|
| 238 |
+
# Return current proxy (if proxy pool, return last fetched)
|
| 239 |
+
return proxy_pool.get_current_proxy() or self.grok_config.get("proxy_url", "")
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
# Global instance
|
| 243 |
+
setting = ConfigManager()
|
app/core/exception.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Exception handlers - OpenAI-compatible error responses"""
|
| 2 |
+
|
| 3 |
+
from fastapi import Request, status
|
| 4 |
+
from fastapi.responses import JSONResponse
|
| 5 |
+
from fastapi.exceptions import RequestValidationError
|
| 6 |
+
from starlette.exceptions import HTTPException as StarletteHTTPException
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
# HTTP error mapping
|
| 10 |
+
HTTP_ERROR_MAP = {
|
| 11 |
+
400: ("invalid_request_error", "Invalid request format or missing required parameters"),
|
| 12 |
+
401: ("invalid_request_error", "Token authentication failed"),
|
| 13 |
+
403: ("permission_error", "No permission to access this resource"),
|
| 14 |
+
404: ("invalid_request_error", "Requested resource not found"),
|
| 15 |
+
429: ("rate_limit_error", "Rate limit exceeded, please try again later"),
|
| 16 |
+
500: ("api_error", "Internal server error"),
|
| 17 |
+
503: ("api_error", "Service temporarily unavailable"),
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
# Grok error code mapping
|
| 21 |
+
GROK_STATUS_MAP = {
|
| 22 |
+
"NO_AUTH_TOKEN": status.HTTP_401_UNAUTHORIZED,
|
| 23 |
+
"INVALID_TOKEN": status.HTTP_401_UNAUTHORIZED,
|
| 24 |
+
"HTTP_ERROR": status.HTTP_502_BAD_GATEWAY,
|
| 25 |
+
"NETWORK_ERROR": status.HTTP_503_SERVICE_UNAVAILABLE,
|
| 26 |
+
"JSON_ERROR": status.HTTP_502_BAD_GATEWAY,
|
| 27 |
+
"API_ERROR": status.HTTP_502_BAD_GATEWAY,
|
| 28 |
+
"STREAM_ERROR": status.HTTP_502_BAD_GATEWAY,
|
| 29 |
+
"NO_RESPONSE": status.HTTP_502_BAD_GATEWAY,
|
| 30 |
+
"TOKEN_SAVE_ERROR": status.HTTP_500_INTERNAL_SERVER_ERROR,
|
| 31 |
+
"NO_AVAILABLE_TOKEN": status.HTTP_503_SERVICE_UNAVAILABLE,
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
GROK_TYPE_MAP = {
|
| 35 |
+
"NO_AUTH_TOKEN": "authentication_error",
|
| 36 |
+
"INVALID_TOKEN": "authentication_error",
|
| 37 |
+
"HTTP_ERROR": "api_error",
|
| 38 |
+
"NETWORK_ERROR": "api_error",
|
| 39 |
+
"JSON_ERROR": "api_error",
|
| 40 |
+
"API_ERROR": "api_error",
|
| 41 |
+
"STREAM_ERROR": "api_error",
|
| 42 |
+
"NO_RESPONSE": "api_error",
|
| 43 |
+
"TOKEN_SAVE_ERROR": "api_error",
|
| 44 |
+
"NO_AVAILABLE_TOKEN": "api_error",
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class GrokApiException(Exception):
|
| 49 |
+
"""Grok API business exception"""
|
| 50 |
+
|
| 51 |
+
def __init__(self, message: str, error_code: str = None, details: dict = None, context: dict = None, status_code: int = None):
|
| 52 |
+
self.message = message
|
| 53 |
+
self.error_code = error_code
|
| 54 |
+
self.details = details or {}
|
| 55 |
+
self.context = context or {}
|
| 56 |
+
self.status_code = status_code or GROK_STATUS_MAP.get(error_code)
|
| 57 |
+
super().__init__(self.message)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def build_error_response(message: str, error_type: str, code: str = None, param: str = None) -> dict:
|
| 61 |
+
"""Build OpenAI-compatible error response"""
|
| 62 |
+
error = {"message": message, "type": error_type}
|
| 63 |
+
|
| 64 |
+
if code:
|
| 65 |
+
error["code"] = code
|
| 66 |
+
if param:
|
| 67 |
+
error["param"] = param
|
| 68 |
+
|
| 69 |
+
return {"error": error}
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
async def http_exception_handler(_: Request, exc: StarletteHTTPException) -> JSONResponse:
|
| 73 |
+
"""Handle HTTP exceptions"""
|
| 74 |
+
error_type, default_msg = HTTP_ERROR_MAP.get(exc.status_code, ("api_error", str(exc.detail)))
|
| 75 |
+
message = str(exc.detail) if exc.detail else default_msg
|
| 76 |
+
|
| 77 |
+
return JSONResponse(
|
| 78 |
+
status_code=exc.status_code,
|
| 79 |
+
content=build_error_response(message, error_type)
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
async def validation_exception_handler(_: Request, exc: RequestValidationError) -> JSONResponse:
|
| 84 |
+
"""Handle validation errors"""
|
| 85 |
+
errors = exc.errors()
|
| 86 |
+
param = errors[0]["loc"][-1] if errors and errors[0].get("loc") else None
|
| 87 |
+
message = errors[0]["msg"] if errors and errors[0].get("msg") else "Invalid request parameters"
|
| 88 |
+
|
| 89 |
+
return JSONResponse(
|
| 90 |
+
status_code=status.HTTP_400_BAD_REQUEST,
|
| 91 |
+
content=build_error_response(message, "invalid_request_error", param=param)
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
async def grok_api_exception_handler(_: Request, exc: GrokApiException) -> JSONResponse:
|
| 96 |
+
"""Handle Grok API exceptions"""
|
| 97 |
+
http_status = GROK_STATUS_MAP.get(exc.error_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
|
| 98 |
+
error_type = GROK_TYPE_MAP.get(exc.error_code, "api_error")
|
| 99 |
+
|
| 100 |
+
return JSONResponse(
|
| 101 |
+
status_code=http_status,
|
| 102 |
+
content=build_error_response(exc.message, error_type, exc.error_code)
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
async def global_exception_handler(_: Request, exc: Exception) -> JSONResponse:
|
| 107 |
+
"""Handle uncaught exceptions"""
|
| 108 |
+
return JSONResponse(
|
| 109 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
| 110 |
+
content=build_error_response("Server encountered an unexpected error, please retry", "api_error")
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def register_exception_handlers(app) -> None:
|
| 115 |
+
"""Register exception handlers"""
|
| 116 |
+
app.add_exception_handler(StarletteHTTPException, http_exception_handler)
|
| 117 |
+
app.add_exception_handler(RequestValidationError, validation_exception_handler)
|
| 118 |
+
app.add_exception_handler(GrokApiException, grok_api_exception_handler)
|
| 119 |
+
app.add_exception_handler(Exception, global_exception_handler)
|
app/core/logger.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Global logging module - singleton logger manager"""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
import logging
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from logging.handlers import RotatingFileHandler
|
| 8 |
+
|
| 9 |
+
from app.core.config import setting
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Filter patterns
|
| 13 |
+
FILTER_PATTERNS = [
|
| 14 |
+
"chunk: b'", # SSE raw bytes
|
| 15 |
+
"Got event:", # SSE event
|
| 16 |
+
"Closing", # SSE close
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class MCPLogFilter(logging.Filter):
|
| 21 |
+
"""MCP log filter - filter verbose DEBUG logs"""
|
| 22 |
+
|
| 23 |
+
def filter(self, record: logging.LogRecord) -> bool:
|
| 24 |
+
"""Filter logs"""
|
| 25 |
+
# Filter SSE DEBUG logs
|
| 26 |
+
if record.name == "sse_starlette.sse" and record.levelno == logging.DEBUG:
|
| 27 |
+
msg = record.getMessage()
|
| 28 |
+
return not any(p in msg for p in FILTER_PATTERNS)
|
| 29 |
+
|
| 30 |
+
# Filter MCP streamable_http DEBUG logs
|
| 31 |
+
if "mcp.server.streamable_http" in record.name and record.levelno == logging.DEBUG:
|
| 32 |
+
return False
|
| 33 |
+
|
| 34 |
+
return True
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class LoggerManager:
|
| 38 |
+
"""Logger manager (singleton)"""
|
| 39 |
+
|
| 40 |
+
_instance = None
|
| 41 |
+
_initialized = False
|
| 42 |
+
|
| 43 |
+
def __new__(cls):
|
| 44 |
+
if cls._instance is None:
|
| 45 |
+
cls._instance = super().__new__(cls)
|
| 46 |
+
return cls._instance
|
| 47 |
+
|
| 48 |
+
def __init__(self):
|
| 49 |
+
"""Initialize logging system"""
|
| 50 |
+
if LoggerManager._initialized:
|
| 51 |
+
return
|
| 52 |
+
|
| 53 |
+
# Configuration
|
| 54 |
+
data_dir_env = os.getenv("DATA_DIR")
|
| 55 |
+
if data_dir_env:
|
| 56 |
+
base_dir = Path(data_dir_env)
|
| 57 |
+
elif Path("/data").exists():
|
| 58 |
+
base_dir = Path("/data")
|
| 59 |
+
else:
|
| 60 |
+
base_dir = Path(__file__).parents[2]
|
| 61 |
+
|
| 62 |
+
log_dir = base_dir / "logs"
|
| 63 |
+
log_dir.mkdir(parents=True, exist_ok=True)
|
| 64 |
+
log_level = setting.global_config.get("log_level", "INFO").upper()
|
| 65 |
+
log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
| 66 |
+
log_file = log_dir / "app.log"
|
| 67 |
+
|
| 68 |
+
# Root logger
|
| 69 |
+
self.logger = logging.getLogger()
|
| 70 |
+
self.logger.setLevel(log_level)
|
| 71 |
+
|
| 72 |
+
# Replace existing handlers to ensure logs show in hosted environments
|
| 73 |
+
if self.logger.handlers:
|
| 74 |
+
for handler in list(self.logger.handlers):
|
| 75 |
+
self.logger.removeHandler(handler)
|
| 76 |
+
handler.close()
|
| 77 |
+
|
| 78 |
+
# Formatter and filter
|
| 79 |
+
formatter = logging.Formatter(log_format)
|
| 80 |
+
mcp_filter = MCPLogFilter()
|
| 81 |
+
|
| 82 |
+
# Console handler
|
| 83 |
+
console = logging.StreamHandler(sys.stdout)
|
| 84 |
+
console.setLevel(log_level)
|
| 85 |
+
console.setFormatter(formatter)
|
| 86 |
+
console.addFilter(mcp_filter)
|
| 87 |
+
|
| 88 |
+
# File handler (10MB, 5 backups)
|
| 89 |
+
file_handler = RotatingFileHandler(
|
| 90 |
+
log_file, maxBytes=10*1024*1024, backupCount=5, encoding="utf-8"
|
| 91 |
+
)
|
| 92 |
+
file_handler.setLevel(log_level)
|
| 93 |
+
file_handler.setFormatter(formatter)
|
| 94 |
+
file_handler.addFilter(mcp_filter)
|
| 95 |
+
|
| 96 |
+
# Add handlers
|
| 97 |
+
self.logger.addHandler(console)
|
| 98 |
+
self.logger.addHandler(file_handler)
|
| 99 |
+
|
| 100 |
+
# Configure third-party loggers
|
| 101 |
+
self._configure_third_party()
|
| 102 |
+
|
| 103 |
+
LoggerManager._initialized = True
|
| 104 |
+
|
| 105 |
+
def _configure_third_party(self):
|
| 106 |
+
"""Configure third-party log levels"""
|
| 107 |
+
config = {
|
| 108 |
+
"asyncio": logging.WARNING,
|
| 109 |
+
"uvicorn": logging.INFO,
|
| 110 |
+
"fastapi": logging.INFO,
|
| 111 |
+
"aiomysql": logging.WARNING,
|
| 112 |
+
"mcp": logging.CRITICAL,
|
| 113 |
+
"fastmcp": logging.CRITICAL,
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
for name, level in config.items():
|
| 117 |
+
logging.getLogger(name).setLevel(level)
|
| 118 |
+
|
| 119 |
+
def debug(self, msg: str) -> None:
|
| 120 |
+
"""Debug log"""
|
| 121 |
+
self.logger.debug(msg)
|
| 122 |
+
|
| 123 |
+
def info(self, msg: str) -> None:
|
| 124 |
+
"""Info log"""
|
| 125 |
+
self.logger.info(msg)
|
| 126 |
+
|
| 127 |
+
def warning(self, msg: str) -> None:
|
| 128 |
+
"""Warning log"""
|
| 129 |
+
self.logger.warning(msg)
|
| 130 |
+
|
| 131 |
+
def error(self, msg: str) -> None:
|
| 132 |
+
"""Error log"""
|
| 133 |
+
self.logger.error(msg)
|
| 134 |
+
|
| 135 |
+
def critical(self, msg: str) -> None:
|
| 136 |
+
"""Critical log"""
|
| 137 |
+
self.logger.critical(msg)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
# Global instance
|
| 141 |
+
logger = LoggerManager()
|
app/core/proxy_pool.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Proxy pool manager - fetch proxy IPs dynamically from a URL"""
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import aiohttp
|
| 5 |
+
import time
|
| 6 |
+
from typing import Optional, List
|
| 7 |
+
from app.core.logger import logger
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class ProxyPool:
|
| 11 |
+
"""Proxy pool manager"""
|
| 12 |
+
|
| 13 |
+
def __init__(self):
|
| 14 |
+
self._pool_url: Optional[str] = None
|
| 15 |
+
self._static_proxy: Optional[str] = None
|
| 16 |
+
self._current_proxy: Optional[str] = None
|
| 17 |
+
self._last_fetch_time: float = 0
|
| 18 |
+
self._fetch_interval: int = 300 # Refresh every 5 minutes
|
| 19 |
+
self._enabled: bool = False
|
| 20 |
+
self._lock = asyncio.Lock()
|
| 21 |
+
|
| 22 |
+
def configure(self, proxy_url: str, proxy_pool_url: str = "", proxy_pool_interval: int = 300):
|
| 23 |
+
"""Configure proxy pool
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
proxy_url: Static proxy URL (socks5h://xxx or http://xxx)
|
| 27 |
+
proxy_pool_url: Proxy pool API URL returning a single proxy address
|
| 28 |
+
proxy_pool_interval: Proxy pool refresh interval (seconds)
|
| 29 |
+
"""
|
| 30 |
+
self._static_proxy = self._normalize_proxy(proxy_url) if proxy_url else None
|
| 31 |
+
pool_url = proxy_pool_url.strip() if proxy_pool_url else None
|
| 32 |
+
if pool_url and self._looks_like_proxy_url(pool_url):
|
| 33 |
+
normalized_proxy = self._normalize_proxy(pool_url)
|
| 34 |
+
if not self._static_proxy:
|
| 35 |
+
self._static_proxy = normalized_proxy
|
| 36 |
+
logger.warning("[ProxyPool] proxy_pool_url looks like a proxy address; using as static proxy. Use proxy_url instead.")
|
| 37 |
+
else:
|
| 38 |
+
logger.warning("[ProxyPool] proxy_pool_url looks like a proxy address; ignored (using proxy_url).")
|
| 39 |
+
pool_url = None
|
| 40 |
+
self._pool_url = pool_url
|
| 41 |
+
self._fetch_interval = proxy_pool_interval
|
| 42 |
+
self._enabled = bool(self._pool_url)
|
| 43 |
+
|
| 44 |
+
if self._enabled:
|
| 45 |
+
logger.info(f"[ProxyPool] Proxy pool enabled: {self._pool_url}, refresh interval: {self._fetch_interval}s")
|
| 46 |
+
elif self._static_proxy:
|
| 47 |
+
logger.info(f"[ProxyPool] Using static proxy: {self._static_proxy}")
|
| 48 |
+
self._current_proxy = self._static_proxy
|
| 49 |
+
else:
|
| 50 |
+
logger.info("[ProxyPool] No proxy configured")
|
| 51 |
+
|
| 52 |
+
async def get_proxy(self) -> Optional[str]:
|
| 53 |
+
"""Get proxy address
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
Proxy URL or None
|
| 57 |
+
"""
|
| 58 |
+
# If proxy pool is disabled, return static proxy
|
| 59 |
+
if not self._enabled:
|
| 60 |
+
return self._static_proxy
|
| 61 |
+
|
| 62 |
+
# Check if refresh needed
|
| 63 |
+
now = time.time()
|
| 64 |
+
if not self._current_proxy or (now - self._last_fetch_time) >= self._fetch_interval:
|
| 65 |
+
async with self._lock:
|
| 66 |
+
# Double-check
|
| 67 |
+
if not self._current_proxy or (now - self._last_fetch_time) >= self._fetch_interval:
|
| 68 |
+
await self._fetch_proxy()
|
| 69 |
+
|
| 70 |
+
return self._current_proxy
|
| 71 |
+
|
| 72 |
+
async def force_refresh(self) -> Optional[str]:
|
| 73 |
+
"""Force refresh proxy (for 403 retry)
|
| 74 |
+
|
| 75 |
+
Returns:
|
| 76 |
+
New proxy URL or None
|
| 77 |
+
"""
|
| 78 |
+
if not self._enabled:
|
| 79 |
+
return self._static_proxy
|
| 80 |
+
|
| 81 |
+
async with self._lock:
|
| 82 |
+
await self._fetch_proxy()
|
| 83 |
+
|
| 84 |
+
return self._current_proxy
|
| 85 |
+
|
| 86 |
+
async def _fetch_proxy(self):
|
| 87 |
+
"""Fetch a new proxy from the proxy pool URL"""
|
| 88 |
+
try:
|
| 89 |
+
logger.debug(f"[ProxyPool] Fetching new proxy from pool: {self._pool_url}")
|
| 90 |
+
|
| 91 |
+
timeout = aiohttp.ClientTimeout(total=10)
|
| 92 |
+
async with aiohttp.ClientSession(timeout=timeout) as session:
|
| 93 |
+
async with session.get(self._pool_url) as response:
|
| 94 |
+
if response.status == 200:
|
| 95 |
+
proxy_text = await response.text()
|
| 96 |
+
proxy = self._normalize_proxy(proxy_text.strip())
|
| 97 |
+
|
| 98 |
+
# Validate proxy format
|
| 99 |
+
if self._validate_proxy(proxy):
|
| 100 |
+
self._current_proxy = proxy
|
| 101 |
+
self._last_fetch_time = time.time()
|
| 102 |
+
logger.info(f"[ProxyPool] Successfully fetched new proxy: {proxy}")
|
| 103 |
+
else:
|
| 104 |
+
logger.error(f"[ProxyPool] Invalid proxy format: {proxy}")
|
| 105 |
+
# Fallback to static proxy
|
| 106 |
+
if not self._current_proxy:
|
| 107 |
+
self._current_proxy = self._static_proxy
|
| 108 |
+
else:
|
| 109 |
+
logger.error(f"[ProxyPool] Failed to fetch proxy: HTTP {response.status}")
|
| 110 |
+
# Fallback to static proxy
|
| 111 |
+
if not self._current_proxy:
|
| 112 |
+
self._current_proxy = self._static_proxy
|
| 113 |
+
|
| 114 |
+
except asyncio.TimeoutError:
|
| 115 |
+
logger.error("[ProxyPool] Proxy fetch timed out")
|
| 116 |
+
if not self._current_proxy:
|
| 117 |
+
self._current_proxy = self._static_proxy
|
| 118 |
+
|
| 119 |
+
except Exception as e:
|
| 120 |
+
logger.error(f"[ProxyPool] Proxy fetch error: {e}")
|
| 121 |
+
# Fallback to static proxy
|
| 122 |
+
if not self._current_proxy:
|
| 123 |
+
self._current_proxy = self._static_proxy
|
| 124 |
+
|
| 125 |
+
def _validate_proxy(self, proxy: str) -> bool:
|
| 126 |
+
"""Validate proxy format
|
| 127 |
+
|
| 128 |
+
Args:
|
| 129 |
+
proxy: Proxy URL
|
| 130 |
+
|
| 131 |
+
Returns:
|
| 132 |
+
True if valid
|
| 133 |
+
"""
|
| 134 |
+
if not proxy:
|
| 135 |
+
return False
|
| 136 |
+
|
| 137 |
+
# Supported protocols
|
| 138 |
+
valid_protocols = ['http://', 'https://', 'socks5://', 'socks5h://']
|
| 139 |
+
|
| 140 |
+
return any(proxy.startswith(proto) for proto in valid_protocols)
|
| 141 |
+
|
| 142 |
+
def _normalize_proxy(self, proxy: str) -> str:
|
| 143 |
+
"""Normalize proxy URL (sock5/socks5 -> socks5h://)"""
|
| 144 |
+
if not proxy:
|
| 145 |
+
return proxy
|
| 146 |
+
|
| 147 |
+
proxy = proxy.strip()
|
| 148 |
+
if proxy.startswith("sock5h://"):
|
| 149 |
+
proxy = proxy.replace("sock5h://", "socks5h://", 1)
|
| 150 |
+
if proxy.startswith("sock5://"):
|
| 151 |
+
proxy = proxy.replace("sock5://", "socks5://", 1)
|
| 152 |
+
if proxy.startswith("socks5://"):
|
| 153 |
+
return proxy.replace("socks5://", "socks5h://", 1)
|
| 154 |
+
return proxy
|
| 155 |
+
|
| 156 |
+
def _looks_like_proxy_url(self, url: str) -> bool:
|
| 157 |
+
"""Check if URL looks like a proxy address (avoid mistaking pool API for proxy)"""
|
| 158 |
+
return url.startswith(("sock5://", "sock5h://", "socks5://", "socks5h://"))
|
| 159 |
+
|
| 160 |
+
def get_current_proxy(self) -> Optional[str]:
|
| 161 |
+
"""Get current proxy (sync)
|
| 162 |
+
|
| 163 |
+
Returns:
|
| 164 |
+
Current proxy URL or None
|
| 165 |
+
"""
|
| 166 |
+
return self._current_proxy or self._static_proxy
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
# Global proxy pool instance
|
| 170 |
+
proxy_pool = ProxyPool()
|
app/core/storage.py
ADDED
|
@@ -0,0 +1,644 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Storage abstraction - supports file, MySQL, and Redis storage"""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import orjson
|
| 5 |
+
import toml
|
| 6 |
+
import asyncio
|
| 7 |
+
import warnings
|
| 8 |
+
import aiofiles
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Dict, Any, Optional, Literal
|
| 11 |
+
from abc import ABC, abstractmethod
|
| 12 |
+
from urllib.parse import urlparse, unquote
|
| 13 |
+
|
| 14 |
+
from app.core.logger import logger
|
| 15 |
+
from app.core.config import DEFAULT_GROK, DEFAULT_GLOBAL
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
StorageMode = Literal["file", "mysql", "redis", "hf", "hub", "dataset"]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class BaseStorage(ABC):
|
| 22 |
+
"""Storage base class"""
|
| 23 |
+
|
| 24 |
+
@abstractmethod
|
| 25 |
+
async def init_db(self) -> None:
|
| 26 |
+
"""Initialize database"""
|
| 27 |
+
pass
|
| 28 |
+
|
| 29 |
+
@abstractmethod
|
| 30 |
+
async def load_tokens(self) -> Dict[str, Any]:
|
| 31 |
+
"""Load token data"""
|
| 32 |
+
pass
|
| 33 |
+
|
| 34 |
+
@abstractmethod
|
| 35 |
+
async def save_tokens(self, data: Dict[str, Any]) -> None:
|
| 36 |
+
"""Save token data"""
|
| 37 |
+
pass
|
| 38 |
+
|
| 39 |
+
@abstractmethod
|
| 40 |
+
async def load_config(self) -> Dict[str, Any]:
|
| 41 |
+
"""Load config data"""
|
| 42 |
+
pass
|
| 43 |
+
|
| 44 |
+
@abstractmethod
|
| 45 |
+
async def save_config(self, data: Dict[str, Any]) -> None:
|
| 46 |
+
"""Save config data"""
|
| 47 |
+
pass
|
| 48 |
+
|
| 49 |
+
@abstractmethod
|
| 50 |
+
async def load_api_keys(self) -> list:
|
| 51 |
+
"""Load API keys data"""
|
| 52 |
+
pass
|
| 53 |
+
|
| 54 |
+
@abstractmethod
|
| 55 |
+
async def save_api_keys(self, data: list) -> None:
|
| 56 |
+
"""Save API keys data"""
|
| 57 |
+
pass
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class FileStorage(BaseStorage):
|
| 61 |
+
"""File storage"""
|
| 62 |
+
|
| 63 |
+
def __init__(self, data_dir: Path):
|
| 64 |
+
self.data_dir = data_dir
|
| 65 |
+
self.token_file = data_dir / "token.json"
|
| 66 |
+
self.config_file = data_dir / "setting.toml"
|
| 67 |
+
self.api_keys_file = data_dir / "api_keys.json"
|
| 68 |
+
self._token_lock = asyncio.Lock()
|
| 69 |
+
self._config_lock = asyncio.Lock()
|
| 70 |
+
self._api_keys_lock = asyncio.Lock()
|
| 71 |
+
|
| 72 |
+
async def init_db(self) -> None:
|
| 73 |
+
"""Initialize file storage"""
|
| 74 |
+
self.data_dir.mkdir(parents=True, exist_ok=True)
|
| 75 |
+
|
| 76 |
+
if not self.token_file.exists():
|
| 77 |
+
await self._write(self.token_file, orjson.dumps({"sso": {}, "ssoSuper": {}}, option=orjson.OPT_INDENT_2).decode())
|
| 78 |
+
logger.info("[Storage] Created token file")
|
| 79 |
+
|
| 80 |
+
if not self.config_file.exists():
|
| 81 |
+
default_global = DEFAULT_GLOBAL.copy()
|
| 82 |
+
base_url = os.getenv("BASE_URL")
|
| 83 |
+
if base_url:
|
| 84 |
+
default_global["base_url"] = base_url
|
| 85 |
+
default = {
|
| 86 |
+
"global": default_global,
|
| 87 |
+
"grok": DEFAULT_GROK.copy()
|
| 88 |
+
}
|
| 89 |
+
await self._write(self.config_file, toml.dumps(default))
|
| 90 |
+
logger.info("[Storage] Created config file")
|
| 91 |
+
|
| 92 |
+
if not self.api_keys_file.exists():
|
| 93 |
+
await self._write(self.api_keys_file, "[]")
|
| 94 |
+
logger.info("[Storage] Created API keys file")
|
| 95 |
+
|
| 96 |
+
async def _read(self, path: Path) -> str:
|
| 97 |
+
"""Read file"""
|
| 98 |
+
async with aiofiles.open(path, "r", encoding="utf-8") as f:
|
| 99 |
+
return await f.read()
|
| 100 |
+
|
| 101 |
+
async def _write(self, path: Path, content: str) -> None:
|
| 102 |
+
"""Write file"""
|
| 103 |
+
async with aiofiles.open(path, "w", encoding="utf-8") as f:
|
| 104 |
+
await f.write(content)
|
| 105 |
+
|
| 106 |
+
async def _load_json(self, path: Path, default: Dict, lock: asyncio.Lock) -> Dict[str, Any]:
|
| 107 |
+
"""Load JSON"""
|
| 108 |
+
try:
|
| 109 |
+
async with lock:
|
| 110 |
+
if not path.exists():
|
| 111 |
+
return default
|
| 112 |
+
return orjson.loads(await self._read(path))
|
| 113 |
+
except Exception as e:
|
| 114 |
+
logger.error(f"[Storage] Failed to load {path.name}: {e}")
|
| 115 |
+
return default
|
| 116 |
+
|
| 117 |
+
async def _save_json(self, path: Path, data: Dict, lock: asyncio.Lock) -> None:
|
| 118 |
+
"""Save JSON"""
|
| 119 |
+
try:
|
| 120 |
+
async with lock:
|
| 121 |
+
await self._write(path, orjson.dumps(data, option=orjson.OPT_INDENT_2).decode())
|
| 122 |
+
except Exception as e:
|
| 123 |
+
logger.error(f"[Storage] Failed to save {path.name}: {e}")
|
| 124 |
+
raise
|
| 125 |
+
|
| 126 |
+
async def _load_toml(self, path: Path, default: Dict, lock: asyncio.Lock) -> Dict[str, Any]:
|
| 127 |
+
"""Load TOML"""
|
| 128 |
+
try:
|
| 129 |
+
async with lock:
|
| 130 |
+
if not path.exists():
|
| 131 |
+
return default
|
| 132 |
+
return toml.loads(await self._read(path))
|
| 133 |
+
except Exception as e:
|
| 134 |
+
logger.error(f"[Storage] Failed to load {path.name}: {e}")
|
| 135 |
+
return default
|
| 136 |
+
|
| 137 |
+
async def _save_toml(self, path: Path, data: Dict, lock: asyncio.Lock) -> None:
|
| 138 |
+
"""Save TOML"""
|
| 139 |
+
try:
|
| 140 |
+
async with lock:
|
| 141 |
+
await self._write(path, toml.dumps(data))
|
| 142 |
+
except Exception as e:
|
| 143 |
+
logger.error(f"[Storage] Failed to save {path.name}: {e}")
|
| 144 |
+
raise
|
| 145 |
+
|
| 146 |
+
async def load_tokens(self) -> Dict[str, Any]:
|
| 147 |
+
"""Load tokens"""
|
| 148 |
+
return await self._load_json(self.token_file, {"sso": {}, "ssoSuper": {}}, self._token_lock)
|
| 149 |
+
|
| 150 |
+
async def save_tokens(self, data: Dict[str, Any]) -> None:
|
| 151 |
+
"""Save tokens"""
|
| 152 |
+
await self._save_json(self.token_file, data, self._token_lock)
|
| 153 |
+
|
| 154 |
+
async def load_config(self) -> Dict[str, Any]:
|
| 155 |
+
"""Load config"""
|
| 156 |
+
return await self._load_toml(self.config_file, {"global": {}, "grok": {}}, self._config_lock)
|
| 157 |
+
|
| 158 |
+
async def save_config(self, data: Dict[str, Any]) -> None:
|
| 159 |
+
"""Save config"""
|
| 160 |
+
await self._save_toml(self.config_file, data, self._config_lock)
|
| 161 |
+
|
| 162 |
+
async def load_api_keys(self) -> list:
|
| 163 |
+
"""Load API keys"""
|
| 164 |
+
return await self._load_json(self.api_keys_file, [], self._api_keys_lock)
|
| 165 |
+
|
| 166 |
+
async def save_api_keys(self, data: list) -> None:
|
| 167 |
+
"""Save API keys"""
|
| 168 |
+
await self._save_json(self.api_keys_file, data, self._api_keys_lock)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
class MysqlStorage(BaseStorage):
|
| 172 |
+
"""MySQL storage"""
|
| 173 |
+
|
| 174 |
+
def __init__(self, database_url: str, data_dir: Path):
|
| 175 |
+
self.database_url = database_url
|
| 176 |
+
self.data_dir = data_dir
|
| 177 |
+
self._pool = None
|
| 178 |
+
self._file = FileStorage(data_dir)
|
| 179 |
+
|
| 180 |
+
async def init_db(self) -> None:
|
| 181 |
+
"""Initialize MySQL"""
|
| 182 |
+
try:
|
| 183 |
+
import aiomysql
|
| 184 |
+
parsed = self._parse_url(self.database_url)
|
| 185 |
+
logger.info(f"[Storage] MySQL: {parsed['user']}@{parsed['host']}:{parsed['port']}/{parsed['db']}")
|
| 186 |
+
|
| 187 |
+
await self._create_db(parsed)
|
| 188 |
+
self._pool = await aiomysql.create_pool(
|
| 189 |
+
host=parsed['host'], port=parsed['port'], user=parsed['user'],
|
| 190 |
+
password=parsed['password'], db=parsed['db'], charset="utf8mb4",
|
| 191 |
+
autocommit=True, maxsize=10
|
| 192 |
+
)
|
| 193 |
+
await self._create_tables()
|
| 194 |
+
await self._file.init_db()
|
| 195 |
+
await self._sync_data()
|
| 196 |
+
|
| 197 |
+
except ImportError:
|
| 198 |
+
raise Exception("aiomysql not installed")
|
| 199 |
+
except Exception as e:
|
| 200 |
+
logger.error(f"[Storage] MySQL initialization failed: {e}")
|
| 201 |
+
raise
|
| 202 |
+
|
| 203 |
+
def _parse_url(self, url: str) -> Dict[str, Any]:
|
| 204 |
+
"""Parse URL"""
|
| 205 |
+
p = urlparse(url)
|
| 206 |
+
return {
|
| 207 |
+
'user': unquote(p.username) if p.username else "",
|
| 208 |
+
'password': unquote(p.password) if p.password else "",
|
| 209 |
+
'host': p.hostname,
|
| 210 |
+
'port': p.port or 3306,
|
| 211 |
+
'db': p.path[1:] if p.path else "grok2api"
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
async def _create_db(self, parsed: Dict) -> None:
|
| 215 |
+
"""Create database"""
|
| 216 |
+
import aiomysql
|
| 217 |
+
pool = await aiomysql.create_pool(
|
| 218 |
+
host=parsed['host'], port=parsed['port'], user=parsed['user'],
|
| 219 |
+
password=parsed['password'], charset="utf8mb4", autocommit=True, maxsize=1
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
try:
|
| 223 |
+
async with pool.acquire() as conn:
|
| 224 |
+
async with conn.cursor() as cursor:
|
| 225 |
+
with warnings.catch_warnings():
|
| 226 |
+
warnings.filterwarnings('ignore', message='.*database exists')
|
| 227 |
+
await cursor.execute(
|
| 228 |
+
f"CREATE DATABASE IF NOT EXISTS `{parsed['db']}` "
|
| 229 |
+
f"CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci"
|
| 230 |
+
)
|
| 231 |
+
logger.info(f"[Storage] Database '{parsed['db']}' ready")
|
| 232 |
+
finally:
|
| 233 |
+
pool.close()
|
| 234 |
+
await pool.wait_closed()
|
| 235 |
+
|
| 236 |
+
async def _create_tables(self) -> None:
|
| 237 |
+
"""Create tables"""
|
| 238 |
+
tables = {
|
| 239 |
+
"grok_tokens": """
|
| 240 |
+
CREATE TABLE IF NOT EXISTS grok_tokens (
|
| 241 |
+
id INT AUTO_INCREMENT PRIMARY KEY,
|
| 242 |
+
data JSON NOT NULL,
|
| 243 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
| 244 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 245 |
+
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
|
| 246 |
+
""",
|
| 247 |
+
"grok_settings": """
|
| 248 |
+
CREATE TABLE IF NOT EXISTS grok_settings (
|
| 249 |
+
id INT AUTO_INCREMENT PRIMARY KEY,
|
| 250 |
+
data JSON NOT NULL,
|
| 251 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
| 252 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 253 |
+
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
|
| 254 |
+
"""
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
async with self._pool.acquire() as conn:
|
| 258 |
+
async with conn.cursor() as cursor:
|
| 259 |
+
with warnings.catch_warnings():
|
| 260 |
+
warnings.filterwarnings('ignore', message='.*already exists')
|
| 261 |
+
for sql in tables.values():
|
| 262 |
+
await cursor.execute(sql)
|
| 263 |
+
logger.info("[Storage] MySQL tables ready")
|
| 264 |
+
|
| 265 |
+
async def _sync_data(self) -> None:
|
| 266 |
+
"""Sync data"""
|
| 267 |
+
try:
|
| 268 |
+
for table, key in [("grok_tokens", "sso"), ("grok_settings", "global")]:
|
| 269 |
+
data = await self._load_db(table)
|
| 270 |
+
if data:
|
| 271 |
+
if table == "grok_tokens":
|
| 272 |
+
await self._file.save_tokens(data)
|
| 273 |
+
else:
|
| 274 |
+
await self._file.save_config(data)
|
| 275 |
+
logger.info(f"[Storage] {table.split('_')[1]} data synced from DB")
|
| 276 |
+
else:
|
| 277 |
+
file_data = await (self._file.load_tokens() if table == "grok_tokens" else self._file.load_config())
|
| 278 |
+
if file_data.get(key) or (table == "grok_tokens" and file_data.get("ssoSuper")):
|
| 279 |
+
await self._save_db(table, file_data)
|
| 280 |
+
logger.info(f"[Storage] {table.split('_')[1]} data initialized to DB")
|
| 281 |
+
except Exception as e:
|
| 282 |
+
logger.warning(f"[Storage] Sync failed: {e}")
|
| 283 |
+
|
| 284 |
+
async def _load_db(self, table: str) -> Optional[Dict]:
|
| 285 |
+
"""Load from DB"""
|
| 286 |
+
try:
|
| 287 |
+
async with self._pool.acquire() as conn:
|
| 288 |
+
async with conn.cursor() as cursor:
|
| 289 |
+
await cursor.execute(f"SELECT data FROM {table} ORDER BY id DESC LIMIT 1")
|
| 290 |
+
result = await cursor.fetchone()
|
| 291 |
+
return orjson.loads(result[0]) if result else None
|
| 292 |
+
except Exception as e:
|
| 293 |
+
logger.error(f"[Storage] Failed to load {table}: {e}")
|
| 294 |
+
return None
|
| 295 |
+
|
| 296 |
+
async def _save_db(self, table: str, data: Dict) -> None:
|
| 297 |
+
"""Save to DB"""
|
| 298 |
+
try:
|
| 299 |
+
async with self._pool.acquire() as conn:
|
| 300 |
+
async with conn.cursor() as cursor:
|
| 301 |
+
json_data = orjson.dumps(data).decode()
|
| 302 |
+
await cursor.execute(f"SELECT id FROM {table} ORDER BY id DESC LIMIT 1")
|
| 303 |
+
result = await cursor.fetchone()
|
| 304 |
+
|
| 305 |
+
if result:
|
| 306 |
+
await cursor.execute(f"UPDATE {table} SET data = %s WHERE id = %s", (json_data, result[0]))
|
| 307 |
+
else:
|
| 308 |
+
await cursor.execute(f"INSERT INTO {table} (data) VALUES (%s)", (json_data,))
|
| 309 |
+
except Exception as e:
|
| 310 |
+
logger.error(f"[Storage] Failed to save {table}: {e}")
|
| 311 |
+
raise
|
| 312 |
+
|
| 313 |
+
async def load_tokens(self) -> Dict[str, Any]:
|
| 314 |
+
"""Load tokens"""
|
| 315 |
+
return await self._file.load_tokens()
|
| 316 |
+
|
| 317 |
+
async def save_tokens(self, data: Dict[str, Any]) -> None:
|
| 318 |
+
"""Save tokens"""
|
| 319 |
+
await self._file.save_tokens(data)
|
| 320 |
+
await self._save_db("grok_tokens", data)
|
| 321 |
+
|
| 322 |
+
async def load_config(self) -> Dict[str, Any]:
|
| 323 |
+
"""Load config"""
|
| 324 |
+
return await self._file.load_config()
|
| 325 |
+
|
| 326 |
+
async def save_config(self, data: Dict[str, Any]) -> None:
|
| 327 |
+
"""Save config"""
|
| 328 |
+
await self._file.save_config(data)
|
| 329 |
+
await self._save_db("grok_settings", data)
|
| 330 |
+
|
| 331 |
+
async def load_api_keys(self) -> list:
|
| 332 |
+
"""Load API keys"""
|
| 333 |
+
return await self._file.load_api_keys()
|
| 334 |
+
|
| 335 |
+
async def save_api_keys(self, data: list) -> None:
|
| 336 |
+
"""Save API keys"""
|
| 337 |
+
await self._file.save_api_keys(data)
|
| 338 |
+
|
| 339 |
+
async def close(self) -> None:
|
| 340 |
+
"""Close connection"""
|
| 341 |
+
if self._pool:
|
| 342 |
+
self._pool.close()
|
| 343 |
+
await self._pool.wait_closed()
|
| 344 |
+
logger.info("[Storage] MySQL closed")
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
class RedisStorage(BaseStorage):
|
| 348 |
+
"""Redis storage"""
|
| 349 |
+
|
| 350 |
+
def __init__(self, redis_url: str, data_dir: Path):
|
| 351 |
+
self.redis_url = redis_url
|
| 352 |
+
self.data_dir = data_dir
|
| 353 |
+
self._redis = None
|
| 354 |
+
self._file = FileStorage(data_dir)
|
| 355 |
+
|
| 356 |
+
async def init_db(self) -> None:
|
| 357 |
+
"""Initialize Redis"""
|
| 358 |
+
try:
|
| 359 |
+
import redis.asyncio as aioredis
|
| 360 |
+
parsed = urlparse(self.redis_url)
|
| 361 |
+
db = int(parsed.path.lstrip('/')) if parsed.path and parsed.path != '/' else 0
|
| 362 |
+
logger.info(f"[Storage] Redis: {parsed.hostname}:{parsed.port or 6379}/{db}")
|
| 363 |
+
|
| 364 |
+
self._redis = aioredis.Redis.from_url(
|
| 365 |
+
self.redis_url, encoding="utf-8", decode_responses=True
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
await self._redis.ping()
|
| 369 |
+
logger.info(f"[Storage] Redis connection successful")
|
| 370 |
+
|
| 371 |
+
await self._file.init_db()
|
| 372 |
+
await self._sync_data()
|
| 373 |
+
|
| 374 |
+
except ImportError:
|
| 375 |
+
raise Exception("redis not installed")
|
| 376 |
+
except Exception as e:
|
| 377 |
+
logger.error(f"[Storage] Redis initialization failed: {e}")
|
| 378 |
+
raise
|
| 379 |
+
|
| 380 |
+
async def _sync_data(self) -> None:
|
| 381 |
+
"""Sync data"""
|
| 382 |
+
try:
|
| 383 |
+
for key, file_func, key_name in [
|
| 384 |
+
("grok:tokens", self._file.load_tokens, "sso"),
|
| 385 |
+
("grok:settings", self._file.load_config, "global")
|
| 386 |
+
]:
|
| 387 |
+
data = await self._redis.get(key)
|
| 388 |
+
if data:
|
| 389 |
+
parsed = orjson.loads(data)
|
| 390 |
+
if key == "grok:tokens":
|
| 391 |
+
await self._file.save_tokens(parsed)
|
| 392 |
+
else:
|
| 393 |
+
await self._file.save_config(parsed)
|
| 394 |
+
logger.info(f"[Storage] {key.split(':')[1]} data synced from Redis")
|
| 395 |
+
else:
|
| 396 |
+
file_data = await file_func()
|
| 397 |
+
if file_data.get(key_name) or (key == "grok:tokens" and file_data.get("ssoSuper")):
|
| 398 |
+
await self._redis.set(key, orjson.dumps(file_data).decode())
|
| 399 |
+
logger.info(f"[Storage] {key.split(':')[1]} data initialized to Redis")
|
| 400 |
+
except Exception as e:
|
| 401 |
+
logger.warning(f"[Storage] Sync failed: {e}")
|
| 402 |
+
|
| 403 |
+
async def _save_redis(self, key: str, data: Dict) -> None:
|
| 404 |
+
"""Save to Redis"""
|
| 405 |
+
try:
|
| 406 |
+
await self._redis.set(key, orjson.dumps(data).decode())
|
| 407 |
+
except Exception as e:
|
| 408 |
+
logger.error(f"[Storage] Failed to save to Redis: {e}")
|
| 409 |
+
raise
|
| 410 |
+
|
| 411 |
+
async def load_tokens(self) -> Dict[str, Any]:
|
| 412 |
+
"""Load tokens"""
|
| 413 |
+
return await self._file.load_tokens()
|
| 414 |
+
|
| 415 |
+
async def save_tokens(self, data: Dict[str, Any]) -> None:
|
| 416 |
+
"""Save tokens"""
|
| 417 |
+
await self._file.save_tokens(data)
|
| 418 |
+
await self._save_redis("grok:tokens", data)
|
| 419 |
+
|
| 420 |
+
async def load_config(self) -> Dict[str, Any]:
|
| 421 |
+
"""Load config"""
|
| 422 |
+
return await self._file.load_config()
|
| 423 |
+
|
| 424 |
+
async def save_config(self, data: Dict[str, Any]) -> None:
|
| 425 |
+
"""Save config"""
|
| 426 |
+
await self._file.save_config(data)
|
| 427 |
+
await self._save_redis("grok:settings", data)
|
| 428 |
+
|
| 429 |
+
async def load_api_keys(self) -> list:
|
| 430 |
+
"""Load API keys"""
|
| 431 |
+
return await self._file.load_api_keys()
|
| 432 |
+
|
| 433 |
+
async def save_api_keys(self, data: list) -> None:
|
| 434 |
+
"""Save API keys"""
|
| 435 |
+
await self._file.save_api_keys(data)
|
| 436 |
+
|
| 437 |
+
async def close(self) -> None:
|
| 438 |
+
"""Close connection"""
|
| 439 |
+
if self._redis:
|
| 440 |
+
await self._redis.close()
|
| 441 |
+
logger.info("[Storage] Redis closed")
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
class StorageManager:
|
| 445 |
+
"""Storage manager (singleton)"""
|
| 446 |
+
|
| 447 |
+
_instance: Optional['StorageManager'] = None
|
| 448 |
+
_storage: Optional[BaseStorage] = None
|
| 449 |
+
_initialized: bool = False
|
| 450 |
+
|
| 451 |
+
def __new__(cls):
|
| 452 |
+
if cls._instance is None:
|
| 453 |
+
cls._instance = super().__new__(cls)
|
| 454 |
+
return cls._instance
|
| 455 |
+
|
| 456 |
+
async def init(self) -> None:
|
| 457 |
+
"""Initialize storage"""
|
| 458 |
+
if self._initialized:
|
| 459 |
+
return
|
| 460 |
+
|
| 461 |
+
mode = os.getenv("STORAGE_MODE", "file").lower()
|
| 462 |
+
url = os.getenv("DATABASE_URL", "")
|
| 463 |
+
data_dir_env = os.getenv("DATA_DIR")
|
| 464 |
+
if data_dir_env:
|
| 465 |
+
data_dir = Path(data_dir_env)
|
| 466 |
+
elif Path("/data").exists():
|
| 467 |
+
data_dir = Path("/data")
|
| 468 |
+
else:
|
| 469 |
+
data_dir = Path(__file__).parents[2] / "data"
|
| 470 |
+
|
| 471 |
+
classes = {"mysql": MysqlStorage, "redis": RedisStorage, "file": FileStorage}
|
| 472 |
+
|
| 473 |
+
if mode in ("mysql", "redis") and not url:
|
| 474 |
+
raise ValueError(f"{mode.upper()} mode requires DATABASE_URL")
|
| 475 |
+
|
| 476 |
+
if mode in ("hf", "hub", "dataset"):
|
| 477 |
+
repo_id = os.getenv("HF_DATASET_REPO", "")
|
| 478 |
+
if not repo_id:
|
| 479 |
+
raise ValueError("HF/DATASET mode requires HF_DATASET_REPO")
|
| 480 |
+
self._storage = HfDatasetStorage(repo_id, data_dir)
|
| 481 |
+
else:
|
| 482 |
+
storage_class = classes.get(mode, FileStorage)
|
| 483 |
+
self._storage = storage_class(url, data_dir) if mode != "file" else storage_class(data_dir)
|
| 484 |
+
|
| 485 |
+
await self._storage.init_db()
|
| 486 |
+
self._initialized = True
|
| 487 |
+
logger.info(f"[Storage] Using {mode} mode")
|
| 488 |
+
|
| 489 |
+
def get_storage(self) -> BaseStorage:
|
| 490 |
+
"""Get storage instance"""
|
| 491 |
+
if not self._initialized or not self._storage:
|
| 492 |
+
raise RuntimeError("StorageManager not initialized")
|
| 493 |
+
return self._storage
|
| 494 |
+
|
| 495 |
+
async def close(self) -> None:
|
| 496 |
+
"""Close storage"""
|
| 497 |
+
if self._storage and hasattr(self._storage, 'close'):
|
| 498 |
+
await self._storage.close()
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
# Global instance
|
| 502 |
+
storage_manager = StorageManager()
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
class HfDatasetStorage(BaseStorage):
|
| 506 |
+
"""Hugging Face dataset-backed storage"""
|
| 507 |
+
|
| 508 |
+
def __init__(self, repo_id: str, data_dir: Path):
|
| 509 |
+
from huggingface_hub import HfApi
|
| 510 |
+
|
| 511 |
+
self.repo_id = repo_id
|
| 512 |
+
self.data_dir = data_dir
|
| 513 |
+
self.data_dir.mkdir(parents=True, exist_ok=True)
|
| 514 |
+
self.config_file = data_dir / "setting.toml"
|
| 515 |
+
self._token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_HUB_TOKEN")
|
| 516 |
+
self._api = HfApi(token=self._token)
|
| 517 |
+
self._locks = {
|
| 518 |
+
"token.json": asyncio.Lock(),
|
| 519 |
+
"setting.toml": asyncio.Lock(),
|
| 520 |
+
"api_keys.json": asyncio.Lock(),
|
| 521 |
+
}
|
| 522 |
+
|
| 523 |
+
async def init_db(self) -> None:
|
| 524 |
+
"""Initialize dataset storage"""
|
| 525 |
+
from huggingface_hub.utils import HfHubHTTPError
|
| 526 |
+
|
| 527 |
+
try:
|
| 528 |
+
await asyncio.to_thread(
|
| 529 |
+
self._api.create_repo,
|
| 530 |
+
repo_id=self.repo_id,
|
| 531 |
+
repo_type="dataset",
|
| 532 |
+
private=True,
|
| 533 |
+
exist_ok=True,
|
| 534 |
+
)
|
| 535 |
+
except HfHubHTTPError as e:
|
| 536 |
+
logger.error(f"[Storage] Dataset init failed: {e}")
|
| 537 |
+
raise
|
| 538 |
+
|
| 539 |
+
await self._ensure_default_files()
|
| 540 |
+
|
| 541 |
+
async def _ensure_default_files(self) -> None:
|
| 542 |
+
"""Ensure default files exist in dataset repo"""
|
| 543 |
+
default_global = DEFAULT_GLOBAL.copy()
|
| 544 |
+
base_url = os.getenv("BASE_URL")
|
| 545 |
+
if base_url:
|
| 546 |
+
default_global["base_url"] = base_url
|
| 547 |
+
defaults = {
|
| 548 |
+
"setting.toml": toml.dumps({"global": default_global, "grok": DEFAULT_GROK.copy()}),
|
| 549 |
+
"token.json": orjson.dumps(
|
| 550 |
+
{"ssoNormal": {}, "ssoSuper": {}},
|
| 551 |
+
option=orjson.OPT_INDENT_2
|
| 552 |
+
).decode(),
|
| 553 |
+
"api_keys.json": "[]",
|
| 554 |
+
}
|
| 555 |
+
|
| 556 |
+
for filename, content in defaults.items():
|
| 557 |
+
await self._ensure_file(filename, content)
|
| 558 |
+
|
| 559 |
+
async def _ensure_file(self, filename: str, content: str) -> None:
|
| 560 |
+
"""Create file in dataset if missing"""
|
| 561 |
+
existing = await self._download_text(filename)
|
| 562 |
+
if existing is None:
|
| 563 |
+
await self._upload_text(filename, content, f"Initialize {filename}")
|
| 564 |
+
|
| 565 |
+
async def _download_text(self, filename: str) -> Optional[str]:
|
| 566 |
+
"""Download file content from dataset"""
|
| 567 |
+
from huggingface_hub import hf_hub_download
|
| 568 |
+
from huggingface_hub.utils import EntryNotFoundError, HfHubHTTPError
|
| 569 |
+
|
| 570 |
+
def download_sync() -> Optional[str]:
|
| 571 |
+
try:
|
| 572 |
+
path = hf_hub_download(
|
| 573 |
+
repo_id=self.repo_id,
|
| 574 |
+
repo_type="dataset",
|
| 575 |
+
filename=filename,
|
| 576 |
+
token=self._token,
|
| 577 |
+
)
|
| 578 |
+
return Path(path).read_text(encoding="utf-8")
|
| 579 |
+
except EntryNotFoundError:
|
| 580 |
+
return None
|
| 581 |
+
except HfHubHTTPError as e:
|
| 582 |
+
status = getattr(e.response, "status_code", None)
|
| 583 |
+
if status == 404:
|
| 584 |
+
return None
|
| 585 |
+
raise
|
| 586 |
+
|
| 587 |
+
return await asyncio.to_thread(download_sync)
|
| 588 |
+
|
| 589 |
+
async def _upload_text(self, filename: str, content: str, message: str) -> None:
|
| 590 |
+
"""Upload file content to dataset"""
|
| 591 |
+
def upload_sync() -> None:
|
| 592 |
+
path = self.data_dir / filename
|
| 593 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 594 |
+
path.write_text(content, encoding="utf-8")
|
| 595 |
+
self._api.upload_file(
|
| 596 |
+
path_or_fileobj=str(path),
|
| 597 |
+
path_in_repo=filename,
|
| 598 |
+
repo_id=self.repo_id,
|
| 599 |
+
repo_type="dataset",
|
| 600 |
+
commit_message=message,
|
| 601 |
+
)
|
| 602 |
+
|
| 603 |
+
await asyncio.to_thread(upload_sync)
|
| 604 |
+
|
| 605 |
+
async def load_tokens(self) -> Dict[str, Any]:
|
| 606 |
+
"""Load token data"""
|
| 607 |
+
async with self._locks["token.json"]:
|
| 608 |
+
content = await self._download_text("token.json")
|
| 609 |
+
if not content:
|
| 610 |
+
return {"ssoNormal": {}, "ssoSuper": {}}
|
| 611 |
+
return orjson.loads(content)
|
| 612 |
+
|
| 613 |
+
async def save_tokens(self, data: Dict[str, Any]) -> None:
|
| 614 |
+
"""Save token data"""
|
| 615 |
+
async with self._locks["token.json"]:
|
| 616 |
+
content = orjson.dumps(data, option=orjson.OPT_INDENT_2).decode()
|
| 617 |
+
await self._upload_text("token.json", content, "Update token.json")
|
| 618 |
+
|
| 619 |
+
async def load_config(self) -> Dict[str, Any]:
|
| 620 |
+
"""Load config data"""
|
| 621 |
+
async with self._locks["setting.toml"]:
|
| 622 |
+
content = await self._download_text("setting.toml")
|
| 623 |
+
if not content:
|
| 624 |
+
return {"global": {}, "grok": {}}
|
| 625 |
+
return toml.loads(content)
|
| 626 |
+
|
| 627 |
+
async def save_config(self, data: Dict[str, Any]) -> None:
|
| 628 |
+
"""Save config data"""
|
| 629 |
+
async with self._locks["setting.toml"]:
|
| 630 |
+
await self._upload_text("setting.toml", toml.dumps(data), "Update setting.toml")
|
| 631 |
+
|
| 632 |
+
async def load_api_keys(self) -> list:
|
| 633 |
+
"""Load API keys data"""
|
| 634 |
+
async with self._locks["api_keys.json"]:
|
| 635 |
+
content = await self._download_text("api_keys.json")
|
| 636 |
+
if not content:
|
| 637 |
+
return []
|
| 638 |
+
return orjson.loads(content)
|
| 639 |
+
|
| 640 |
+
async def save_api_keys(self, data: list) -> None:
|
| 641 |
+
"""Save API keys data"""
|
| 642 |
+
async with self._locks["api_keys.json"]:
|
| 643 |
+
content = orjson.dumps(data, option=orjson.OPT_INDENT_2).decode()
|
| 644 |
+
await self._upload_text("api_keys.json", content, "Update api_keys.json")
|
app/models/grok_models.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Grok model configs and enum definitions"""
|
| 2 |
+
|
| 3 |
+
from enum import Enum
|
| 4 |
+
from typing import Dict, Any, Tuple
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# Model configuration
|
| 8 |
+
_MODEL_CONFIG: Dict[str, Dict[str, Any]] = {
|
| 9 |
+
"grok-3-fast": {
|
| 10 |
+
"grok_model": ("grok-3", "MODEL_MODE_FAST"),
|
| 11 |
+
"rate_limit_model": "grok-3",
|
| 12 |
+
"cost": {"type": "low_cost", "multiplier": 1, "description": "Counts as 1 call"},
|
| 13 |
+
"requires_super": False,
|
| 14 |
+
"display_name": "Grok 3 Fast",
|
| 15 |
+
"description": "Fast and efficient Grok 3 model",
|
| 16 |
+
"raw_model_path": "xai/grok-3",
|
| 17 |
+
"default_temperature": 1.0,
|
| 18 |
+
"default_max_output_tokens": 8192,
|
| 19 |
+
"supported_max_output_tokens": 131072,
|
| 20 |
+
"default_top_p": 0.95
|
| 21 |
+
},
|
| 22 |
+
"grok-4-fast": {
|
| 23 |
+
"grok_model": ("grok-4-mini-thinking-tahoe", "MODEL_MODE_GROK_4_MINI_THINKING"),
|
| 24 |
+
"rate_limit_model": "grok-4-mini-thinking-tahoe",
|
| 25 |
+
"cost": {"type": "low_cost", "multiplier": 1, "description": "Counts as 1 call"},
|
| 26 |
+
"requires_super": False,
|
| 27 |
+
"display_name": "Grok 4 Fast",
|
| 28 |
+
"description": "Fast version of Grok 4 with mini thinking capabilities",
|
| 29 |
+
"raw_model_path": "xai/grok-4-mini-thinking-tahoe",
|
| 30 |
+
"default_temperature": 1.0,
|
| 31 |
+
"default_max_output_tokens": 8192,
|
| 32 |
+
"supported_max_output_tokens": 131072,
|
| 33 |
+
"default_top_p": 0.95
|
| 34 |
+
},
|
| 35 |
+
"grok-4-fast-expert": {
|
| 36 |
+
"grok_model": ("grok-4-mini-thinking-tahoe", "MODEL_MODE_EXPERT"),
|
| 37 |
+
"rate_limit_model": "grok-4-mini-thinking-tahoe",
|
| 38 |
+
"cost": {"type": "high_cost", "multiplier": 4, "description": "Counts as 4 calls"},
|
| 39 |
+
"requires_super": False,
|
| 40 |
+
"display_name": "Grok 4 Fast Expert",
|
| 41 |
+
"description": "Expert mode of Grok 4 Fast with enhanced reasoning",
|
| 42 |
+
"raw_model_path": "xai/grok-4-mini-thinking-tahoe",
|
| 43 |
+
"default_temperature": 1.0,
|
| 44 |
+
"default_max_output_tokens": 32768,
|
| 45 |
+
"supported_max_output_tokens": 131072,
|
| 46 |
+
"default_top_p": 0.95
|
| 47 |
+
},
|
| 48 |
+
"grok-4-expert": {
|
| 49 |
+
"grok_model": ("grok-4", "MODEL_MODE_EXPERT"),
|
| 50 |
+
"rate_limit_model": "grok-4",
|
| 51 |
+
"cost": {"type": "high_cost", "multiplier": 4, "description": "Counts as 4 calls"},
|
| 52 |
+
"requires_super": False,
|
| 53 |
+
"display_name": "Grok 4 Expert",
|
| 54 |
+
"description": "Full Grok 4 model with expert mode capabilities",
|
| 55 |
+
"raw_model_path": "xai/grok-4",
|
| 56 |
+
"default_temperature": 1.0,
|
| 57 |
+
"default_max_output_tokens": 32768,
|
| 58 |
+
"supported_max_output_tokens": 131072,
|
| 59 |
+
"default_top_p": 0.95
|
| 60 |
+
},
|
| 61 |
+
"grok-4-heavy": {
|
| 62 |
+
"grok_model": ("grok-4-heavy", "MODEL_MODE_HEAVY"),
|
| 63 |
+
"rate_limit_model": "grok-4-heavy",
|
| 64 |
+
"cost": {"type": "independent", "multiplier": 1, "description": "Billed independently, Super users only"},
|
| 65 |
+
"requires_super": True,
|
| 66 |
+
"display_name": "Grok 4 Heavy",
|
| 67 |
+
"description": "Most powerful Grok 4 model with heavy computational capabilities. Requires Super Token for access.",
|
| 68 |
+
"raw_model_path": "xai/grok-4-heavy",
|
| 69 |
+
"default_temperature": 1.0,
|
| 70 |
+
"default_max_output_tokens": 65536,
|
| 71 |
+
"supported_max_output_tokens": 131072,
|
| 72 |
+
"default_top_p": 0.95
|
| 73 |
+
},
|
| 74 |
+
"grok-4.1": {
|
| 75 |
+
"grok_model": ("grok-4-1-non-thinking-w-tool", "MODEL_MODE_GROK_4_1"),
|
| 76 |
+
"rate_limit_model": "grok-4-1-non-thinking-w-tool",
|
| 77 |
+
"cost": {"type": "low_cost", "multiplier": 1, "description": "Counts as 1 call"},
|
| 78 |
+
"requires_super": False,
|
| 79 |
+
"display_name": "Grok 4.1",
|
| 80 |
+
"description": "Latest Grok 4.1 model with tool capabilities",
|
| 81 |
+
"raw_model_path": "xai/grok-4-1-non-thinking-w-tool",
|
| 82 |
+
"default_temperature": 1.0,
|
| 83 |
+
"default_max_output_tokens": 8192,
|
| 84 |
+
"supported_max_output_tokens": 131072,
|
| 85 |
+
"default_top_p": 0.95
|
| 86 |
+
},
|
| 87 |
+
"grok-4.1-thinking": {
|
| 88 |
+
"grok_model": ("grok-4-1-thinking-1108b", "MODEL_MODE_AUTO"),
|
| 89 |
+
"rate_limit_model": "grok-4-1-thinking-1108b",
|
| 90 |
+
"cost": {"type": "high_cost", "multiplier": 1, "description": "Counts as 1 call"},
|
| 91 |
+
"requires_super": False,
|
| 92 |
+
"display_name": "Grok 4.1 Thinking",
|
| 93 |
+
"description": "Grok 4.1 model with advanced thinking and tool capabilities",
|
| 94 |
+
"raw_model_path": "xai/grok-4-1-thinking-1108b",
|
| 95 |
+
"default_temperature": 1.0,
|
| 96 |
+
"default_max_output_tokens": 32768,
|
| 97 |
+
"supported_max_output_tokens": 131072,
|
| 98 |
+
"default_top_p": 0.95
|
| 99 |
+
},
|
| 100 |
+
"grok-imagine-0.9": {
|
| 101 |
+
"grok_model": ("grok-3", "MODEL_MODE_FAST"),
|
| 102 |
+
"rate_limit_model": "grok-3",
|
| 103 |
+
"cost": {"type": "low_cost", "multiplier": 1, "description": "Counts as 1 call"},
|
| 104 |
+
"requires_super": False,
|
| 105 |
+
"display_name": "Grok Imagine 0.9",
|
| 106 |
+
"description": "Image and video generation model. Supports text-to-image and image-to-video generation.",
|
| 107 |
+
"raw_model_path": "xai/grok-imagine-0.9",
|
| 108 |
+
"default_temperature": 1.0,
|
| 109 |
+
"default_max_output_tokens": 8192,
|
| 110 |
+
"supported_max_output_tokens": 131072,
|
| 111 |
+
"default_top_p": 0.95,
|
| 112 |
+
"is_video_model": True
|
| 113 |
+
}
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class TokenType(Enum):
|
| 118 |
+
"""Token type"""
|
| 119 |
+
NORMAL = "ssoNormal"
|
| 120 |
+
SUPER = "ssoSuper"
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class Models(Enum):
|
| 124 |
+
"""Supported models"""
|
| 125 |
+
GROK_3_FAST = "grok-3-fast"
|
| 126 |
+
GROK_4_1 = "grok-4.1"
|
| 127 |
+
GROK_4_1_THINKING = "grok-4.1-thinking"
|
| 128 |
+
GROK_4_FAST = "grok-4-fast"
|
| 129 |
+
GROK_4_FAST_EXPERT = "grok-4-fast-expert"
|
| 130 |
+
GROK_4_EXPERT = "grok-4-expert"
|
| 131 |
+
GROK_4_HEAVY = "grok-4-heavy"
|
| 132 |
+
GROK_IMAGINE_0_9 = "grok-imagine-0.9"
|
| 133 |
+
|
| 134 |
+
@classmethod
|
| 135 |
+
def get_model_info(cls, model: str) -> Dict[str, Any]:
|
| 136 |
+
"""Get model config"""
|
| 137 |
+
return _MODEL_CONFIG.get(model, {})
|
| 138 |
+
|
| 139 |
+
@classmethod
|
| 140 |
+
def is_valid_model(cls, model: str) -> bool:
|
| 141 |
+
"""Check whether model is valid"""
|
| 142 |
+
return model in _MODEL_CONFIG
|
| 143 |
+
|
| 144 |
+
@classmethod
|
| 145 |
+
def to_grok(cls, model: str) -> Tuple[str, str]:
|
| 146 |
+
"""Convert to Grok internal model name and mode
|
| 147 |
+
|
| 148 |
+
Returns:
|
| 149 |
+
(model name, mode) tuple
|
| 150 |
+
"""
|
| 151 |
+
config = _MODEL_CONFIG.get(model)
|
| 152 |
+
return config["grok_model"] if config else (model, "MODEL_MODE_FAST")
|
| 153 |
+
|
| 154 |
+
@classmethod
|
| 155 |
+
def to_rate_limit(cls, model: str) -> str:
|
| 156 |
+
"""Convert to rate limit model name"""
|
| 157 |
+
config = _MODEL_CONFIG.get(model)
|
| 158 |
+
return config["rate_limit_model"] if config else model
|
| 159 |
+
|
| 160 |
+
@classmethod
|
| 161 |
+
def get_all_model_names(cls) -> list[str]:
|
| 162 |
+
"""Get all model names"""
|
| 163 |
+
return list(_MODEL_CONFIG.keys())
|
app/models/openai_schema.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""OpenAI request/response model definitions"""
|
| 2 |
+
|
| 3 |
+
from fastapi import HTTPException
|
| 4 |
+
from typing import Optional, List, Union, Dict, Any
|
| 5 |
+
from pydantic import BaseModel, Field, field_validator
|
| 6 |
+
|
| 7 |
+
from app.models.grok_models import Models
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class OpenAIChatRequest(BaseModel):
|
| 11 |
+
"""OpenAI chat request"""
|
| 12 |
+
|
| 13 |
+
model: str = Field(..., description="Model name", min_length=1)
|
| 14 |
+
messages: List[Dict[str, Any]] = Field(..., description="Message list", min_length=1)
|
| 15 |
+
image_url: Optional[str] = Field(None, description="Single image URL")
|
| 16 |
+
image_urls: Optional[List[str]] = Field(None, description="Image URL list")
|
| 17 |
+
x_statsig_id: Optional[str] = Field(None, description="Override x-statsig-id")
|
| 18 |
+
stream: bool = Field(False, description="Streaming response")
|
| 19 |
+
temperature: Optional[float] = Field(0.7, ge=0, le=2, description="Sampling temperature")
|
| 20 |
+
max_tokens: Optional[int] = Field(None, ge=1, le=100000, description="Max tokens")
|
| 21 |
+
top_p: Optional[float] = Field(1.0, ge=0, le=1, description="Sampling parameter")
|
| 22 |
+
|
| 23 |
+
@classmethod
|
| 24 |
+
@field_validator('messages')
|
| 25 |
+
def validate_messages(cls, v):
|
| 26 |
+
"""Validate message format"""
|
| 27 |
+
if not v:
|
| 28 |
+
raise HTTPException(status_code=400, detail="Message list cannot be empty")
|
| 29 |
+
|
| 30 |
+
for msg in v:
|
| 31 |
+
if not isinstance(msg, dict):
|
| 32 |
+
raise HTTPException(status_code=400, detail="Each message must be a dict")
|
| 33 |
+
if 'role' not in msg:
|
| 34 |
+
raise HTTPException(status_code=400, detail="Message missing 'role' field")
|
| 35 |
+
if 'content' not in msg:
|
| 36 |
+
raise HTTPException(status_code=400, detail="Message missing 'content' field")
|
| 37 |
+
if msg['role'] not in ['system', 'user', 'assistant']:
|
| 38 |
+
raise HTTPException(
|
| 39 |
+
status_code=400,
|
| 40 |
+
detail=f"Invalid role '{msg['role']}', must be system/user/assistant"
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
return v
|
| 44 |
+
|
| 45 |
+
@classmethod
|
| 46 |
+
@field_validator('model')
|
| 47 |
+
def validate_model(cls, v):
|
| 48 |
+
"""Validate model name"""
|
| 49 |
+
if not Models.is_valid_model(v):
|
| 50 |
+
supported = Models.get_all_model_names()
|
| 51 |
+
raise HTTPException(
|
| 52 |
+
status_code=400,
|
| 53 |
+
detail=f"Unsupported model '{v}', supported: {', '.join(supported)}"
|
| 54 |
+
)
|
| 55 |
+
return v
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class OpenAIChatCompletionMessage(BaseModel):
|
| 59 |
+
"""Chat completion message"""
|
| 60 |
+
role: str = Field(..., description="Role")
|
| 61 |
+
content: str = Field(..., description="Content")
|
| 62 |
+
reference_id: Optional[str] = Field(default=None, description="Reference ID")
|
| 63 |
+
annotations: Optional[List[str]] = Field(default=None, description="Annotations")
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class OpenAIChatCompletionChoice(BaseModel):
|
| 67 |
+
"""Chat completion choice"""
|
| 68 |
+
index: int = Field(..., description="Index")
|
| 69 |
+
message: OpenAIChatCompletionMessage = Field(..., description="Message")
|
| 70 |
+
logprobs: Optional[float] = Field(default=None, description="Log probability")
|
| 71 |
+
finish_reason: str = Field(default="stop", description="Finish reason")
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class OpenAIChatCompletionResponse(BaseModel):
|
| 75 |
+
"""Chat completion response"""
|
| 76 |
+
id: str = Field(..., description="Response ID")
|
| 77 |
+
object: str = Field("chat.completion", description="Object type")
|
| 78 |
+
created: int = Field(..., description="Created timestamp")
|
| 79 |
+
model: str = Field(..., description="Model")
|
| 80 |
+
choices: List[OpenAIChatCompletionChoice] = Field(..., description="Choices")
|
| 81 |
+
usage: Optional[Dict[str, Any]] = Field(None, description="Token usage")
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class OpenAIChatCompletionChunkMessage(BaseModel):
|
| 85 |
+
"""Streaming message chunk"""
|
| 86 |
+
role: str = Field(..., description="Role")
|
| 87 |
+
content: str = Field(..., description="Content")
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class OpenAIChatCompletionChunkChoice(BaseModel):
|
| 91 |
+
"""Streaming choice"""
|
| 92 |
+
index: int = Field(..., description="Index")
|
| 93 |
+
delta: Optional[Union[Dict[str, Any], OpenAIChatCompletionChunkMessage]] = Field(
|
| 94 |
+
None, description="Delta data"
|
| 95 |
+
)
|
| 96 |
+
finish_reason: Optional[str] = Field(None, description="Finish reason")
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class OpenAIChatCompletionChunkResponse(BaseModel):
|
| 100 |
+
"""Streaming chat response"""
|
| 101 |
+
id: str = Field(..., description="Response ID")
|
| 102 |
+
object: str = Field(default="chat.completion.chunk", description="Object type")
|
| 103 |
+
created: int = Field(..., description="Created timestamp")
|
| 104 |
+
model: str = Field(..., description="Model")
|
| 105 |
+
system_fingerprint: Optional[str] = Field(default=None, description="System fingerprint")
|
| 106 |
+
choices: List[OpenAIChatCompletionChunkChoice] = Field(..., description="Choices")
|
app/services/api_keys.py
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""API key manager - multi-user key management"""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import orjson
|
| 5 |
+
import time
|
| 6 |
+
import secrets
|
| 7 |
+
import asyncio
|
| 8 |
+
from typing import List, Dict, Optional
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
from app.core.logger import logger
|
| 12 |
+
from app.core.config import setting
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class ApiKeyManager:
|
| 16 |
+
"""API key management service"""
|
| 17 |
+
|
| 18 |
+
_instance = None
|
| 19 |
+
|
| 20 |
+
def __new__(cls):
|
| 21 |
+
if cls._instance is None:
|
| 22 |
+
cls._instance = super().__new__(cls)
|
| 23 |
+
return cls._instance
|
| 24 |
+
|
| 25 |
+
def __init__(self):
|
| 26 |
+
if hasattr(self, '_initialized'):
|
| 27 |
+
return
|
| 28 |
+
|
| 29 |
+
self.file_path = self._resolve_data_dir() / "api_keys.json"
|
| 30 |
+
self._keys: List[Dict] = []
|
| 31 |
+
self._lock = asyncio.Lock()
|
| 32 |
+
self._loaded = False
|
| 33 |
+
self._storage = None
|
| 34 |
+
|
| 35 |
+
self._initialized = True
|
| 36 |
+
logger.debug(f"[ApiKey] Initialized: {self.file_path}")
|
| 37 |
+
|
| 38 |
+
@staticmethod
|
| 39 |
+
def _resolve_data_dir() -> Path:
|
| 40 |
+
"""Resolve data directory for persistence."""
|
| 41 |
+
data_dir_env = os.getenv("DATA_DIR")
|
| 42 |
+
if data_dir_env:
|
| 43 |
+
return Path(data_dir_env)
|
| 44 |
+
if Path("/data").exists():
|
| 45 |
+
return Path("/data")
|
| 46 |
+
return Path(__file__).parents[2] / "data"
|
| 47 |
+
|
| 48 |
+
def set_storage(self, storage) -> None:
|
| 49 |
+
"""Set storage instance"""
|
| 50 |
+
self._storage = storage
|
| 51 |
+
data_dir = getattr(storage, "data_dir", None)
|
| 52 |
+
if data_dir:
|
| 53 |
+
self.file_path = Path(data_dir) / "api_keys.json"
|
| 54 |
+
|
| 55 |
+
def _use_storage(self) -> bool:
|
| 56 |
+
return bool(self._storage and hasattr(self._storage, "load_api_keys") and hasattr(self._storage, "save_api_keys"))
|
| 57 |
+
|
| 58 |
+
async def init(self):
|
| 59 |
+
"""Initialize and load data"""
|
| 60 |
+
if not self._loaded:
|
| 61 |
+
await self._load_data()
|
| 62 |
+
|
| 63 |
+
async def _load_data(self):
|
| 64 |
+
"""Load API keys"""
|
| 65 |
+
if self._loaded:
|
| 66 |
+
return
|
| 67 |
+
|
| 68 |
+
try:
|
| 69 |
+
if self._use_storage():
|
| 70 |
+
self._keys = await self._storage.load_api_keys()
|
| 71 |
+
self._loaded = True
|
| 72 |
+
logger.debug(f"[ApiKey] Loaded {len(self._keys)} API keys (storage)")
|
| 73 |
+
return
|
| 74 |
+
|
| 75 |
+
if not self.file_path.exists():
|
| 76 |
+
self._keys = []
|
| 77 |
+
self._loaded = True
|
| 78 |
+
return
|
| 79 |
+
|
| 80 |
+
async with self._lock:
|
| 81 |
+
content = await asyncio.to_thread(self.file_path.read_bytes)
|
| 82 |
+
if content:
|
| 83 |
+
self._keys = orjson.loads(content)
|
| 84 |
+
self._loaded = True
|
| 85 |
+
logger.debug(f"[ApiKey] Loaded {len(self._keys)} API keys")
|
| 86 |
+
except Exception as e:
|
| 87 |
+
logger.error(f"[ApiKey] Load failed: {e}")
|
| 88 |
+
self._keys = []
|
| 89 |
+
self._loaded = True # Prevent overwrite if load fails
|
| 90 |
+
|
| 91 |
+
async def _save_data(self):
|
| 92 |
+
"""Save API keys"""
|
| 93 |
+
if not self._loaded:
|
| 94 |
+
logger.warning("[ApiKey] Save skipped because data is not loaded to avoid overwrite")
|
| 95 |
+
return
|
| 96 |
+
|
| 97 |
+
try:
|
| 98 |
+
if self._use_storage():
|
| 99 |
+
await self._storage.save_api_keys(self._keys)
|
| 100 |
+
return
|
| 101 |
+
|
| 102 |
+
# Ensure directory exists
|
| 103 |
+
self.file_path.parent.mkdir(parents=True, exist_ok=True)
|
| 104 |
+
|
| 105 |
+
async with self._lock:
|
| 106 |
+
content = orjson.dumps(self._keys, option=orjson.OPT_INDENT_2)
|
| 107 |
+
await asyncio.to_thread(self.file_path.write_bytes, content)
|
| 108 |
+
except Exception as e:
|
| 109 |
+
logger.error(f"[ApiKey] Save failed: {e}")
|
| 110 |
+
|
| 111 |
+
def generate_key(self) -> str:
|
| 112 |
+
"""Generate a new sk- prefixed key"""
|
| 113 |
+
return f"sk-{secrets.token_urlsafe(24)}"
|
| 114 |
+
|
| 115 |
+
async def add_key(self, name: str) -> Dict:
|
| 116 |
+
"""Add API key"""
|
| 117 |
+
new_key = {
|
| 118 |
+
"key": self.generate_key(),
|
| 119 |
+
"name": name,
|
| 120 |
+
"created_at": int(time.time()),
|
| 121 |
+
"is_active": True
|
| 122 |
+
}
|
| 123 |
+
self._keys.append(new_key)
|
| 124 |
+
await self._save_data()
|
| 125 |
+
logger.info(f"[ApiKey] Added new key: {name}")
|
| 126 |
+
return new_key
|
| 127 |
+
|
| 128 |
+
async def batch_add_keys(self, name_prefix: str, count: int) -> List[Dict]:
|
| 129 |
+
"""Batch add API keys"""
|
| 130 |
+
new_keys = []
|
| 131 |
+
for i in range(1, count + 1):
|
| 132 |
+
name = f"{name_prefix}-{i}" if count > 1 else name_prefix
|
| 133 |
+
new_keys.append({
|
| 134 |
+
"key": self.generate_key(),
|
| 135 |
+
"name": name,
|
| 136 |
+
"created_at": int(time.time()),
|
| 137 |
+
"is_active": True
|
| 138 |
+
})
|
| 139 |
+
|
| 140 |
+
self._keys.extend(new_keys)
|
| 141 |
+
await self._save_data()
|
| 142 |
+
logger.info(f"[ApiKey] Batch added {count} keys, prefix: {name_prefix}")
|
| 143 |
+
return new_keys
|
| 144 |
+
|
| 145 |
+
async def delete_key(self, key: str) -> bool:
|
| 146 |
+
"""Delete API key"""
|
| 147 |
+
initial_len = len(self._keys)
|
| 148 |
+
self._keys = [k for k in self._keys if k["key"] != key]
|
| 149 |
+
|
| 150 |
+
if len(self._keys) != initial_len:
|
| 151 |
+
await self._save_data()
|
| 152 |
+
logger.info(f"[ApiKey] Deleted key: {key[:10]}...")
|
| 153 |
+
return True
|
| 154 |
+
return False
|
| 155 |
+
|
| 156 |
+
async def batch_delete_keys(self, keys: List[str]) -> int:
|
| 157 |
+
"""Batch delete API keys"""
|
| 158 |
+
initial_len = len(self._keys)
|
| 159 |
+
self._keys = [k for k in self._keys if k["key"] not in keys]
|
| 160 |
+
|
| 161 |
+
deleted_count = initial_len - len(self._keys)
|
| 162 |
+
if deleted_count > 0:
|
| 163 |
+
await self._save_data()
|
| 164 |
+
logger.info(f"[ApiKey] Batch deleted {deleted_count} keys")
|
| 165 |
+
return deleted_count
|
| 166 |
+
|
| 167 |
+
async def update_key_status(self, key: str, is_active: bool) -> bool:
|
| 168 |
+
"""Update key status"""
|
| 169 |
+
for k in self._keys:
|
| 170 |
+
if k["key"] == key:
|
| 171 |
+
k["is_active"] = is_active
|
| 172 |
+
await self._save_data()
|
| 173 |
+
return True
|
| 174 |
+
return False
|
| 175 |
+
|
| 176 |
+
async def batch_update_keys_status(self, keys: List[str], is_active: bool) -> int:
|
| 177 |
+
"""Batch update key status"""
|
| 178 |
+
updated_count = 0
|
| 179 |
+
for k in self._keys:
|
| 180 |
+
if k["key"] in keys:
|
| 181 |
+
if k["is_active"] != is_active:
|
| 182 |
+
k["is_active"] = is_active
|
| 183 |
+
updated_count += 1
|
| 184 |
+
|
| 185 |
+
if updated_count > 0:
|
| 186 |
+
await self._save_data()
|
| 187 |
+
logger.info(f"[ApiKey] Batch updated {updated_count} keys to: {is_active}")
|
| 188 |
+
return updated_count
|
| 189 |
+
|
| 190 |
+
async def update_key_name(self, key: str, name: str) -> bool:
|
| 191 |
+
"""Update key note"""
|
| 192 |
+
for k in self._keys:
|
| 193 |
+
if k["key"] == key:
|
| 194 |
+
k["name"] = name
|
| 195 |
+
await self._save_data()
|
| 196 |
+
return True
|
| 197 |
+
return False
|
| 198 |
+
|
| 199 |
+
def validate_key(self, key: str) -> Optional[Dict]:
|
| 200 |
+
"""Validate key and return key info"""
|
| 201 |
+
# 1. Check global config key (default admin key)
|
| 202 |
+
global_key = setting.grok_config.get("api_key")
|
| 203 |
+
if global_key and key == global_key:
|
| 204 |
+
return {
|
| 205 |
+
"key": global_key,
|
| 206 |
+
"name": "Default admin",
|
| 207 |
+
"is_active": True,
|
| 208 |
+
"is_admin": True
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
# 2. Check multi-key list
|
| 212 |
+
for k in self._keys:
|
| 213 |
+
if k["key"] == key:
|
| 214 |
+
if k["is_active"]:
|
| 215 |
+
return {**k, "is_admin": False} # Keys are not treated as admins for now
|
| 216 |
+
return None
|
| 217 |
+
|
| 218 |
+
return None
|
| 219 |
+
|
| 220 |
+
def get_all_keys(self) -> List[Dict]:
|
| 221 |
+
"""Get all keys"""
|
| 222 |
+
return self._keys
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
# Global instance
|
| 226 |
+
api_key_manager = ApiKeyManager()
|
app/services/grok/cache.py
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Cache service module - download, cache, and clean images and videos"""
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import base64
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from typing import Optional, Tuple
|
| 7 |
+
from curl_cffi.requests import AsyncSession
|
| 8 |
+
|
| 9 |
+
from app.core.config import setting
|
| 10 |
+
from app.core.logger import logger
|
| 11 |
+
from app.services.grok.statsig import get_dynamic_headers
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Constants
|
| 15 |
+
MIME_TYPES = {
|
| 16 |
+
'.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', '.png': 'image/png',
|
| 17 |
+
'.gif': 'image/gif', '.webp': 'image/webp', '.bmp': 'image/bmp',
|
| 18 |
+
}
|
| 19 |
+
DEFAULT_MIME = 'image/jpeg'
|
| 20 |
+
ASSETS_URL = "https://assets.grok.com"
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class CacheService:
|
| 24 |
+
"""Cache service base class"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, cache_type: str, timeout: float = 30.0):
|
| 27 |
+
self.cache_type = cache_type
|
| 28 |
+
self.cache_dir = Path(f"data/temp/{cache_type}")
|
| 29 |
+
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
| 30 |
+
self.timeout = timeout
|
| 31 |
+
self._cleanup_lock = asyncio.Lock()
|
| 32 |
+
|
| 33 |
+
def _get_path(self, file_path: str) -> Path:
|
| 34 |
+
"""Convert file path to cache path"""
|
| 35 |
+
return self.cache_dir / file_path.lstrip('/').replace('/', '-')
|
| 36 |
+
|
| 37 |
+
def _log(self, level: str, msg: str):
|
| 38 |
+
"""Unified log output"""
|
| 39 |
+
getattr(logger, level)(f"[{self.cache_type.upper()}Cache] {msg}")
|
| 40 |
+
|
| 41 |
+
def _build_headers(self, file_path: str, auth_token: str) -> dict:
|
| 42 |
+
"""Build request headers"""
|
| 43 |
+
cf = setting.grok_config.get("cf_clearance", "")
|
| 44 |
+
return {
|
| 45 |
+
**get_dynamic_headers(pathname=file_path),
|
| 46 |
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
|
| 47 |
+
"Sec-Fetch-Dest": "document",
|
| 48 |
+
"Sec-Fetch-Mode": "navigate",
|
| 49 |
+
"Sec-Fetch-Site": "same-site",
|
| 50 |
+
"Sec-Fetch-User": "?1",
|
| 51 |
+
"Upgrade-Insecure-Requests": "1",
|
| 52 |
+
"Referer": "https://grok.com/",
|
| 53 |
+
"Cookie": f"{auth_token};{cf}" if cf else auth_token
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
async def download(self, file_path: str, auth_token: str, timeout: Optional[float] = None) -> Optional[Path]:
|
| 57 |
+
"""Download and cache file"""
|
| 58 |
+
cache_path = self._get_path(file_path)
|
| 59 |
+
if cache_path.exists():
|
| 60 |
+
self._log("debug", "File already cached")
|
| 61 |
+
return cache_path
|
| 62 |
+
|
| 63 |
+
# Outer retry: configurable status codes (401/429, etc)
|
| 64 |
+
retry_codes = setting.grok_config.get("retry_status_codes", [401, 429])
|
| 65 |
+
MAX_OUTER_RETRY = 3
|
| 66 |
+
|
| 67 |
+
for outer_retry in range(MAX_OUTER_RETRY + 1): # +1 ensures 3 retries
|
| 68 |
+
try:
|
| 69 |
+
# Inner retry: 403 retry (cache uses cache proxy only, no pool)
|
| 70 |
+
max_403_retries = 5
|
| 71 |
+
retry_403_count = 0
|
| 72 |
+
|
| 73 |
+
while retry_403_count <= max_403_retries:
|
| 74 |
+
proxy = await setting.get_proxy_async("cache")
|
| 75 |
+
proxies = {"http": proxy, "https": proxy} if proxy else {}
|
| 76 |
+
|
| 77 |
+
if proxy and outer_retry == 0 and retry_403_count == 0:
|
| 78 |
+
self._log("debug", f"Using proxy: {proxy.split('@')[-1] if '@' in proxy else proxy}")
|
| 79 |
+
|
| 80 |
+
async with AsyncSession() as session:
|
| 81 |
+
url = f"{ASSETS_URL}{file_path}"
|
| 82 |
+
if outer_retry == 0 and retry_403_count == 0:
|
| 83 |
+
self._log("debug", f"Downloading: {url}")
|
| 84 |
+
|
| 85 |
+
response = await session.get(
|
| 86 |
+
url,
|
| 87 |
+
headers=self._build_headers(file_path, auth_token),
|
| 88 |
+
proxies=proxies,
|
| 89 |
+
timeout=timeout or self.timeout,
|
| 90 |
+
allow_redirects=True,
|
| 91 |
+
impersonate="chrome133a"
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
# Check 403 error - inner retry (cache does not use proxy pool)
|
| 95 |
+
if response.status_code == 403:
|
| 96 |
+
retry_403_count += 1
|
| 97 |
+
|
| 98 |
+
if retry_403_count <= max_403_retries:
|
| 99 |
+
self._log("warning", f"403 error, retrying ({retry_403_count}/{max_403_retries})...")
|
| 100 |
+
await asyncio.sleep(0.5)
|
| 101 |
+
continue
|
| 102 |
+
|
| 103 |
+
self._log("error", f"403 error, retried {retry_403_count-1} times, giving up")
|
| 104 |
+
return None
|
| 105 |
+
|
| 106 |
+
# Check retryable status codes - outer retry
|
| 107 |
+
if response.status_code in retry_codes:
|
| 108 |
+
if outer_retry < MAX_OUTER_RETRY:
|
| 109 |
+
delay = (outer_retry + 1) * 0.1 # Progressive delay: 0.1s, 0.2s, 0.3s
|
| 110 |
+
self._log("warning", f"{response.status_code} error, outer retry ({outer_retry+1}/{MAX_OUTER_RETRY}), waiting {delay}s...")
|
| 111 |
+
await asyncio.sleep(delay)
|
| 112 |
+
break # Exit inner loop, retry outer
|
| 113 |
+
else:
|
| 114 |
+
self._log("error", f"{response.status_code} error, retried {outer_retry} times, giving up")
|
| 115 |
+
return None
|
| 116 |
+
|
| 117 |
+
response.raise_for_status()
|
| 118 |
+
await asyncio.to_thread(cache_path.write_bytes, response.content)
|
| 119 |
+
|
| 120 |
+
if outer_retry > 0 or retry_403_count > 0:
|
| 121 |
+
self._log("info", "Retry succeeded")
|
| 122 |
+
else:
|
| 123 |
+
self._log("debug", "Cached successfully")
|
| 124 |
+
|
| 125 |
+
# Async cleanup (with error handling)
|
| 126 |
+
asyncio.create_task(self._safe_cleanup())
|
| 127 |
+
return cache_path
|
| 128 |
+
|
| 129 |
+
except Exception as e:
|
| 130 |
+
if outer_retry < MAX_OUTER_RETRY - 1:
|
| 131 |
+
self._log("warning", f"Download error: {e}, outer retry ({outer_retry+1}/{MAX_OUTER_RETRY})...")
|
| 132 |
+
await asyncio.sleep(0.5)
|
| 133 |
+
continue
|
| 134 |
+
|
| 135 |
+
self._log("error", f"Download failed: {e} (retried {outer_retry} times)")
|
| 136 |
+
return None
|
| 137 |
+
|
| 138 |
+
return None
|
| 139 |
+
|
| 140 |
+
def get_cached(self, file_path: str) -> Optional[Path]:
|
| 141 |
+
"""Get cached file"""
|
| 142 |
+
path = self._get_path(file_path)
|
| 143 |
+
return path if path.exists() else None
|
| 144 |
+
|
| 145 |
+
async def _safe_cleanup(self):
|
| 146 |
+
"""Safe cleanup (catch exceptions)"""
|
| 147 |
+
try:
|
| 148 |
+
await self.cleanup()
|
| 149 |
+
except Exception as e:
|
| 150 |
+
self._log("error", f"Background cleanup failed: {e}")
|
| 151 |
+
|
| 152 |
+
async def cleanup(self):
|
| 153 |
+
"""Clean cache when size exceeds limit"""
|
| 154 |
+
if self._cleanup_lock.locked():
|
| 155 |
+
return
|
| 156 |
+
|
| 157 |
+
async with self._cleanup_lock:
|
| 158 |
+
try:
|
| 159 |
+
max_mb = setting.global_config.get(f"{self.cache_type}_cache_max_size_mb", 500)
|
| 160 |
+
max_bytes = max_mb * 1024 * 1024
|
| 161 |
+
|
| 162 |
+
# Get file info (path, size, mtime)
|
| 163 |
+
files = [(f, (s := f.stat()).st_size, s.st_mtime)
|
| 164 |
+
for f in self.cache_dir.glob("*") if f.is_file()]
|
| 165 |
+
total = sum(size for _, size, _ in files)
|
| 166 |
+
|
| 167 |
+
if total <= max_bytes:
|
| 168 |
+
return
|
| 169 |
+
|
| 170 |
+
self._log("info", f"Cleaning cache {total/1024/1024:.1f}MB -> {max_mb}MB")
|
| 171 |
+
|
| 172 |
+
# Delete oldest files
|
| 173 |
+
for path, size, _ in sorted(files, key=lambda x: x[2]):
|
| 174 |
+
if total <= max_bytes:
|
| 175 |
+
break
|
| 176 |
+
await asyncio.to_thread(path.unlink)
|
| 177 |
+
total -= size
|
| 178 |
+
|
| 179 |
+
self._log("info", f"Cleanup complete: {total/1024/1024:.1f}MB")
|
| 180 |
+
except Exception as e:
|
| 181 |
+
self._log("error", f"Cleanup failed: {e}")
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
class ImageCache(CacheService):
|
| 185 |
+
"""Image cache service"""
|
| 186 |
+
|
| 187 |
+
def __init__(self):
|
| 188 |
+
super().__init__("image", timeout=30.0)
|
| 189 |
+
|
| 190 |
+
async def download_image(self, path: str, token: str) -> Optional[Path]:
|
| 191 |
+
"""Download image"""
|
| 192 |
+
return await self.download(path, token)
|
| 193 |
+
|
| 194 |
+
@staticmethod
|
| 195 |
+
def to_base64(image_path: Path) -> Optional[str]:
|
| 196 |
+
"""Convert image to base64"""
|
| 197 |
+
try:
|
| 198 |
+
if not image_path.exists():
|
| 199 |
+
logger.error(f"[ImageCache] File not found: {image_path}")
|
| 200 |
+
return None
|
| 201 |
+
|
| 202 |
+
data = base64.b64encode(image_path.read_bytes()).decode()
|
| 203 |
+
mime = MIME_TYPES.get(image_path.suffix.lower(), DEFAULT_MIME)
|
| 204 |
+
return f"data:{mime};base64,{data}"
|
| 205 |
+
except Exception as e:
|
| 206 |
+
logger.error(f"[ImageCache] Conversion failed: {e}")
|
| 207 |
+
return None
|
| 208 |
+
|
| 209 |
+
async def download_base64(self, path: str, token: str) -> Optional[str]:
|
| 210 |
+
"""Download and convert to base64 (auto-delete temp file)"""
|
| 211 |
+
try:
|
| 212 |
+
cache_path = await self.download(path, token)
|
| 213 |
+
if not cache_path:
|
| 214 |
+
return None
|
| 215 |
+
|
| 216 |
+
result = self.to_base64(cache_path)
|
| 217 |
+
|
| 218 |
+
# Clean temp file
|
| 219 |
+
try:
|
| 220 |
+
cache_path.unlink()
|
| 221 |
+
except Exception as e:
|
| 222 |
+
logger.warning(f"[ImageCache] Failed to delete temp file: {e}")
|
| 223 |
+
|
| 224 |
+
return result
|
| 225 |
+
except Exception as e:
|
| 226 |
+
logger.error(f"[ImageCache] Base64 download failed: {e}")
|
| 227 |
+
return None
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
class VideoCache(CacheService):
|
| 231 |
+
"""Video cache service"""
|
| 232 |
+
|
| 233 |
+
def __init__(self):
|
| 234 |
+
super().__init__("video", timeout=60.0)
|
| 235 |
+
|
| 236 |
+
async def download_video(self, path: str, token: str) -> Optional[Path]:
|
| 237 |
+
"""Download video"""
|
| 238 |
+
return await self.download(path, token)
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
# Global instance
|
| 242 |
+
image_cache_service = ImageCache()
|
| 243 |
+
video_cache_service = VideoCache()
|
app/services/grok/client.py
ADDED
|
@@ -0,0 +1,386 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Grok API client - convert OpenAI requests and process responses"""
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import orjson
|
| 5 |
+
from typing import Dict, List, Tuple, Any, Optional
|
| 6 |
+
from curl_cffi.requests import AsyncSession as curl_AsyncSession
|
| 7 |
+
|
| 8 |
+
from app.core.config import setting
|
| 9 |
+
from app.core.logger import logger
|
| 10 |
+
from app.models.grok_models import Models
|
| 11 |
+
from app.services.grok.processer import GrokResponseProcessor
|
| 12 |
+
from app.services.images.normalize import normalize_messages
|
| 13 |
+
from app.services.grok.statsig import get_dynamic_headers
|
| 14 |
+
from app.services.grok.token import token_manager
|
| 15 |
+
from app.services.grok.upload import ImageUploadManager
|
| 16 |
+
from app.services.grok.create import PostCreateManager
|
| 17 |
+
from app.core.exception import GrokApiException
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Constants
|
| 21 |
+
API_ENDPOINT = "https://grok.com/rest/app-chat/conversations/new"
|
| 22 |
+
TIMEOUT = 120
|
| 23 |
+
BROWSER = "chrome133a"
|
| 24 |
+
MAX_RETRY = 3
|
| 25 |
+
MAX_UPLOADS = 20 # Increase upload concurrency for higher throughput
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class GrokClient:
|
| 29 |
+
"""Grok API client"""
|
| 30 |
+
|
| 31 |
+
_upload_sem = None # Lazy init
|
| 32 |
+
|
| 33 |
+
@staticmethod
|
| 34 |
+
def _get_upload_semaphore():
|
| 35 |
+
"""Get upload semaphore (dynamic config)"""
|
| 36 |
+
if GrokClient._upload_sem is None:
|
| 37 |
+
# Read from config or use default
|
| 38 |
+
max_concurrency = setting.global_config.get("max_upload_concurrency", MAX_UPLOADS)
|
| 39 |
+
GrokClient._upload_sem = asyncio.Semaphore(max_concurrency)
|
| 40 |
+
logger.debug(f"[Client] Initialized upload concurrency: {max_concurrency}")
|
| 41 |
+
return GrokClient._upload_sem
|
| 42 |
+
|
| 43 |
+
@staticmethod
|
| 44 |
+
async def openai_to_grok(request: dict):
|
| 45 |
+
"""Convert OpenAI request to Grok request"""
|
| 46 |
+
model = request["model"]
|
| 47 |
+
normalized_messages = normalize_messages(
|
| 48 |
+
request.get("messages", []),
|
| 49 |
+
image_url=request.get("image_url"),
|
| 50 |
+
image_urls=request.get("image_urls"),
|
| 51 |
+
)
|
| 52 |
+
content, images = GrokClient._extract_content(normalized_messages)
|
| 53 |
+
stream = request.get("stream", False)
|
| 54 |
+
statsig_id = request.get("x_statsig_id") or ""
|
| 55 |
+
|
| 56 |
+
# Get model info
|
| 57 |
+
info = Models.get_model_info(model)
|
| 58 |
+
grok_model, mode = Models.to_grok(model)
|
| 59 |
+
is_video = info.get("is_video_model", False)
|
| 60 |
+
logger.debug(f"[Client] Model selected: {model} -> {grok_model} ({mode})")
|
| 61 |
+
|
| 62 |
+
if images and model != "grok-imagine-0.9":
|
| 63 |
+
raise GrokApiException(
|
| 64 |
+
"Images require model grok-imagine-0.9",
|
| 65 |
+
"MODEL_MISMATCH",
|
| 66 |
+
{"model": model}
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
if images:
|
| 70 |
+
logger.debug("[Client] Image mode enabled")
|
| 71 |
+
|
| 72 |
+
# Video model limitation
|
| 73 |
+
if is_video and len(images) > 1:
|
| 74 |
+
logger.warning("[Client] Video model supports only 1 image; using the first one")
|
| 75 |
+
images = images[:1]
|
| 76 |
+
|
| 77 |
+
return await GrokClient._retry(model, content, images, grok_model, mode, is_video, stream, statsig_id)
|
| 78 |
+
|
| 79 |
+
@staticmethod
|
| 80 |
+
async def _retry(model: str, content: str, images: List[str], grok_model: str, mode: str, is_video: bool, stream: bool, statsig_id: str):
|
| 81 |
+
"""Retry request"""
|
| 82 |
+
last_err = None
|
| 83 |
+
|
| 84 |
+
for i in range(MAX_RETRY):
|
| 85 |
+
try:
|
| 86 |
+
token = await token_manager.get_token(model)
|
| 87 |
+
img_ids, img_uris = await GrokClient._upload(images, token, statsig_id)
|
| 88 |
+
|
| 89 |
+
# Create session for video model
|
| 90 |
+
post_id = None
|
| 91 |
+
if is_video and img_ids and img_uris:
|
| 92 |
+
post_id = await GrokClient._create_post(img_ids[0], img_uris[0], token, statsig_id)
|
| 93 |
+
|
| 94 |
+
payload = GrokClient._build_payload(content, grok_model, mode, img_ids, img_uris, is_video, post_id)
|
| 95 |
+
return await GrokClient._request(payload, token, model, stream, post_id, statsig_id)
|
| 96 |
+
|
| 97 |
+
except GrokApiException as e:
|
| 98 |
+
last_err = e
|
| 99 |
+
# Check if retryable
|
| 100 |
+
if e.error_code not in ["HTTP_ERROR", "NO_AVAILABLE_TOKEN"]:
|
| 101 |
+
raise
|
| 102 |
+
|
| 103 |
+
status = e.context.get("status") if e.context else None
|
| 104 |
+
retry_codes = setting.grok_config.get("retry_status_codes", [401, 429])
|
| 105 |
+
|
| 106 |
+
if status not in retry_codes:
|
| 107 |
+
raise
|
| 108 |
+
|
| 109 |
+
if i < MAX_RETRY - 1:
|
| 110 |
+
logger.warning(f"[Client] Failed (status: {status}), retry {i+1}/{MAX_RETRY}")
|
| 111 |
+
await asyncio.sleep(0.5)
|
| 112 |
+
|
| 113 |
+
raise last_err or GrokApiException("Request failed", "REQUEST_ERROR")
|
| 114 |
+
|
| 115 |
+
@staticmethod
|
| 116 |
+
def _extract_content(messages: List[Dict]) -> Tuple[str, List[str]]:
|
| 117 |
+
"""Extract text and images, preserving role structure"""
|
| 118 |
+
formatted_messages = []
|
| 119 |
+
images = []
|
| 120 |
+
|
| 121 |
+
# Role mapping
|
| 122 |
+
role_map = {
|
| 123 |
+
"system": "System",
|
| 124 |
+
"user": "User",
|
| 125 |
+
"assistant": "grok"
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
for msg in messages:
|
| 129 |
+
role = msg.get("role", "user")
|
| 130 |
+
content = msg.get("content", "")
|
| 131 |
+
role_prefix = role_map.get(role, role)
|
| 132 |
+
|
| 133 |
+
# Extract text content
|
| 134 |
+
text_parts = []
|
| 135 |
+
if isinstance(content, list):
|
| 136 |
+
for item in content:
|
| 137 |
+
if item.get("type") == "text":
|
| 138 |
+
text_parts.append(item.get("text", ""))
|
| 139 |
+
elif item.get("type") == "image_url":
|
| 140 |
+
image_url = item.get("image_url")
|
| 141 |
+
if isinstance(image_url, dict):
|
| 142 |
+
url = image_url.get("url")
|
| 143 |
+
else:
|
| 144 |
+
url = image_url
|
| 145 |
+
if url:
|
| 146 |
+
images.append(url)
|
| 147 |
+
else:
|
| 148 |
+
text_parts.append(content)
|
| 149 |
+
|
| 150 |
+
# Merge text and add role prefix
|
| 151 |
+
msg_text = "".join(text_parts).strip()
|
| 152 |
+
if msg_text:
|
| 153 |
+
formatted_messages.append(f"{role_prefix}: {msg_text}")
|
| 154 |
+
|
| 155 |
+
# Join all messages with newlines
|
| 156 |
+
return "\n".join(formatted_messages), images
|
| 157 |
+
|
| 158 |
+
@staticmethod
|
| 159 |
+
async def _upload(urls: List[str], token: str, statsig_id: str = "") -> Tuple[List[str], List[str]]:
|
| 160 |
+
"""Upload images concurrently"""
|
| 161 |
+
if not urls:
|
| 162 |
+
return [], []
|
| 163 |
+
|
| 164 |
+
async def upload_limited(url):
|
| 165 |
+
async with GrokClient._get_upload_semaphore():
|
| 166 |
+
return await ImageUploadManager.upload(url, token, statsig_id)
|
| 167 |
+
|
| 168 |
+
results = await asyncio.gather(*[upload_limited(u) for u in urls], return_exceptions=True)
|
| 169 |
+
|
| 170 |
+
ids, uris = [], []
|
| 171 |
+
for url, result in zip(urls, results):
|
| 172 |
+
if isinstance(result, Exception):
|
| 173 |
+
logger.warning(f"[Client] Upload failed: {url} - {result}")
|
| 174 |
+
if isinstance(result, GrokApiException):
|
| 175 |
+
raise result
|
| 176 |
+
raise GrokApiException("Image upload failed", "IMAGE_UPLOAD_FAILED", {"url": url})
|
| 177 |
+
elif isinstance(result, tuple) and len(result) == 2:
|
| 178 |
+
fid, furi = result
|
| 179 |
+
if fid:
|
| 180 |
+
ids.append(fid)
|
| 181 |
+
uris.append(furi)
|
| 182 |
+
else:
|
| 183 |
+
raise GrokApiException("Image upload failed", "IMAGE_UPLOAD_FAILED", {"url": url})
|
| 184 |
+
|
| 185 |
+
return ids, uris
|
| 186 |
+
|
| 187 |
+
@staticmethod
|
| 188 |
+
async def _create_post(file_id: str, file_uri: str, token: str, statsig_id: str = "") -> Optional[str]:
|
| 189 |
+
"""Create video session"""
|
| 190 |
+
try:
|
| 191 |
+
result = await PostCreateManager.create(file_id, file_uri, token, statsig_id)
|
| 192 |
+
if result and result.get("success"):
|
| 193 |
+
return result.get("post_id")
|
| 194 |
+
except Exception as e:
|
| 195 |
+
logger.warning(f"[Client] Failed to create session: {e}")
|
| 196 |
+
return None
|
| 197 |
+
|
| 198 |
+
@staticmethod
|
| 199 |
+
def _build_payload(content: str, model: str, mode: str, img_ids: List[str], img_uris: List[str], is_video: bool = False, post_id: str = None) -> Dict:
|
| 200 |
+
"""Build request payload"""
|
| 201 |
+
# Special handling for video models
|
| 202 |
+
if is_video and img_uris:
|
| 203 |
+
img_msg = f"https://grok.com/imagine/{post_id}" if post_id else f"https://assets.grok.com/post/{img_uris[0]}"
|
| 204 |
+
return {
|
| 205 |
+
"temporary": True,
|
| 206 |
+
"modelName": "grok-3",
|
| 207 |
+
"message": f"{img_msg} {content} --mode=custom",
|
| 208 |
+
"fileAttachments": img_ids,
|
| 209 |
+
"toolOverrides": {"videoGen": True}
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
# Standard payload
|
| 213 |
+
return {
|
| 214 |
+
"temporary": setting.grok_config.get("temporary", True),
|
| 215 |
+
"modelName": model,
|
| 216 |
+
"message": content,
|
| 217 |
+
"fileAttachments": img_ids,
|
| 218 |
+
"imageAttachments": [],
|
| 219 |
+
"disableSearch": False,
|
| 220 |
+
"enableImageGeneration": True,
|
| 221 |
+
"returnImageBytes": False,
|
| 222 |
+
"returnRawGrokInXaiRequest": False,
|
| 223 |
+
"enableImageStreaming": True,
|
| 224 |
+
"imageGenerationCount": 2,
|
| 225 |
+
"forceConcise": False,
|
| 226 |
+
"toolOverrides": {},
|
| 227 |
+
"enableSideBySide": True,
|
| 228 |
+
"sendFinalMetadata": True,
|
| 229 |
+
"isReasoning": False,
|
| 230 |
+
"webpageUrls": [],
|
| 231 |
+
"disableTextFollowUps": True,
|
| 232 |
+
"responseMetadata": {"requestModelDetails": {"modelId": model}},
|
| 233 |
+
"disableMemory": False,
|
| 234 |
+
"forceSideBySide": False,
|
| 235 |
+
"modelMode": mode,
|
| 236 |
+
"isAsyncChat": False
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
@staticmethod
|
| 240 |
+
async def _request(payload: dict, token: str, model: str, stream: bool, post_id: str = None, statsig_id: str = ""):
|
| 241 |
+
"""Send request"""
|
| 242 |
+
if not token:
|
| 243 |
+
raise GrokApiException("Missing authentication token", "NO_AUTH_TOKEN")
|
| 244 |
+
|
| 245 |
+
# Outer retry: configurable status codes (401/429, etc)
|
| 246 |
+
retry_codes = setting.grok_config.get("retry_status_codes", [401, 429])
|
| 247 |
+
MAX_OUTER_RETRY = 3
|
| 248 |
+
|
| 249 |
+
for outer_retry in range(MAX_OUTER_RETRY + 1): # +1 ensures 3 retries
|
| 250 |
+
# Inner retry: 403 with proxy pool
|
| 251 |
+
max_403_retries = 5
|
| 252 |
+
retry_403_count = 0
|
| 253 |
+
|
| 254 |
+
while retry_403_count <= max_403_retries:
|
| 255 |
+
# Fetch proxy asynchronously
|
| 256 |
+
from app.core.proxy_pool import proxy_pool
|
| 257 |
+
|
| 258 |
+
# If retrying 403 with proxy pool, force refresh
|
| 259 |
+
if retry_403_count > 0 and proxy_pool._enabled:
|
| 260 |
+
logger.info(f"[Client] 403 retry {retry_403_count}/{max_403_retries}, refreshing proxy...")
|
| 261 |
+
proxy = await proxy_pool.force_refresh()
|
| 262 |
+
else:
|
| 263 |
+
proxy = await setting.get_proxy_async("service")
|
| 264 |
+
|
| 265 |
+
proxies = {"http": proxy, "https": proxy} if proxy else None
|
| 266 |
+
|
| 267 |
+
# Build headers (inside loop for token retry)
|
| 268 |
+
headers = GrokClient._build_headers(token, statsig_id)
|
| 269 |
+
if model == "grok-imagine-0.9":
|
| 270 |
+
file_attachments = payload.get("fileAttachments", [])
|
| 271 |
+
ref_id = post_id or (file_attachments[0] if file_attachments else "")
|
| 272 |
+
if ref_id:
|
| 273 |
+
headers["Referer"] = f"https://grok.com/imagine/{ref_id}"
|
| 274 |
+
|
| 275 |
+
# Create session and send request
|
| 276 |
+
session = curl_AsyncSession(impersonate=BROWSER)
|
| 277 |
+
try:
|
| 278 |
+
response = await session.post(
|
| 279 |
+
API_ENDPOINT,
|
| 280 |
+
headers=headers,
|
| 281 |
+
data=orjson.dumps(payload),
|
| 282 |
+
timeout=TIMEOUT,
|
| 283 |
+
stream=True,
|
| 284 |
+
proxies=proxies
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
# Inner 403 retry: only when proxy pool is enabled
|
| 288 |
+
if response.status_code == 403 and proxy_pool._enabled:
|
| 289 |
+
retry_403_count += 1
|
| 290 |
+
if retry_403_count <= max_403_retries:
|
| 291 |
+
logger.warning(f"[Client] 403 error, retrying ({retry_403_count}/{max_403_retries})...")
|
| 292 |
+
await session.close()
|
| 293 |
+
await asyncio.sleep(0.5)
|
| 294 |
+
continue
|
| 295 |
+
logger.error(f"[Client] 403 error, retried {retry_403_count-1} times, giving up")
|
| 296 |
+
|
| 297 |
+
# Check retryable status codes - outer retry
|
| 298 |
+
if response.status_code in retry_codes:
|
| 299 |
+
if outer_retry < MAX_OUTER_RETRY:
|
| 300 |
+
delay = (outer_retry + 1) * 0.1
|
| 301 |
+
logger.warning(f"[Client] {response.status_code} error, outer retry ({outer_retry+1}/{MAX_OUTER_RETRY}), waiting {delay}s...")
|
| 302 |
+
await session.close()
|
| 303 |
+
await asyncio.sleep(delay)
|
| 304 |
+
break # Exit inner loop for outer retry
|
| 305 |
+
else:
|
| 306 |
+
logger.error(f"[Client] {response.status_code} error, retried {outer_retry} times, giving up")
|
| 307 |
+
try:
|
| 308 |
+
GrokClient._handle_error(response, token)
|
| 309 |
+
finally:
|
| 310 |
+
await session.close()
|
| 311 |
+
|
| 312 |
+
# Check other response statuses
|
| 313 |
+
if response.status_code != 200:
|
| 314 |
+
try:
|
| 315 |
+
GrokClient._handle_error(response, token)
|
| 316 |
+
finally:
|
| 317 |
+
await session.close()
|
| 318 |
+
|
| 319 |
+
# Success - reset failure count
|
| 320 |
+
asyncio.create_task(token_manager.reset_failure(token))
|
| 321 |
+
|
| 322 |
+
if outer_retry > 0 or retry_403_count > 0:
|
| 323 |
+
logger.info("[Client] Retry succeeded")
|
| 324 |
+
|
| 325 |
+
# Process response
|
| 326 |
+
if stream:
|
| 327 |
+
# Streaming response closes session via iterator
|
| 328 |
+
result = GrokResponseProcessor.process_stream(response, token, session)
|
| 329 |
+
else:
|
| 330 |
+
# Non-streaming closes session after processing
|
| 331 |
+
try:
|
| 332 |
+
result = await GrokResponseProcessor.process_normal(response, token, model)
|
| 333 |
+
finally:
|
| 334 |
+
await session.close()
|
| 335 |
+
|
| 336 |
+
asyncio.create_task(GrokClient._update_limits(token, model))
|
| 337 |
+
return result
|
| 338 |
+
|
| 339 |
+
except Exception as e:
|
| 340 |
+
await session.close()
|
| 341 |
+
if "RequestsError" in str(type(e)):
|
| 342 |
+
logger.error(f"[Client] Network error: {e}")
|
| 343 |
+
raise GrokApiException(f"Network error: {e}", "NETWORK_ERROR") from e
|
| 344 |
+
raise
|
| 345 |
+
|
| 346 |
+
raise GrokApiException("Request failed: maximum retries reached", "MAX_RETRIES_EXCEEDED")
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
@staticmethod
|
| 350 |
+
def _build_headers(token: str, statsig_id: str = "") -> Dict[str, str]:
|
| 351 |
+
"""Build request headers"""
|
| 352 |
+
headers = get_dynamic_headers("/rest/app-chat/conversations/new", statsig_id=statsig_id)
|
| 353 |
+
cf = setting.grok_config.get("cf_clearance", "")
|
| 354 |
+
headers["Cookie"] = f"{token};{cf}" if cf else token
|
| 355 |
+
return headers
|
| 356 |
+
|
| 357 |
+
@staticmethod
|
| 358 |
+
def _handle_error(response, token: str):
|
| 359 |
+
"""Handle error"""
|
| 360 |
+
if response.status_code == 403:
|
| 361 |
+
msg = "Your IP was blocked. Try: 1) change IP 2) use proxy 3) set CF clearance"
|
| 362 |
+
data = {"cf_blocked": True, "status": 403}
|
| 363 |
+
logger.warning(f"[Client] {msg}")
|
| 364 |
+
else:
|
| 365 |
+
try:
|
| 366 |
+
data = response.json()
|
| 367 |
+
msg = str(data)
|
| 368 |
+
except:
|
| 369 |
+
data = response.text
|
| 370 |
+
msg = data[:200] if data else "Unknown error"
|
| 371 |
+
|
| 372 |
+
asyncio.create_task(token_manager.record_failure(token, response.status_code, msg))
|
| 373 |
+
asyncio.create_task(token_manager.apply_cooldown(token, response.status_code))
|
| 374 |
+
raise GrokApiException(
|
| 375 |
+
f"Request failed: {response.status_code} - {msg}",
|
| 376 |
+
"HTTP_ERROR",
|
| 377 |
+
{"status": response.status_code, "data": data}
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
@staticmethod
|
| 381 |
+
async def _update_limits(token: str, model: str):
|
| 382 |
+
"""Update rate limits"""
|
| 383 |
+
try:
|
| 384 |
+
await token_manager.check_limits(token, model)
|
| 385 |
+
except Exception as e:
|
| 386 |
+
logger.error(f"[Client] Failed to update limits: {e}")
|
app/services/grok/create.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Post creation manager - create session before video generation"""
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import orjson
|
| 5 |
+
from typing import Dict, Any, Optional
|
| 6 |
+
from curl_cffi.requests import AsyncSession
|
| 7 |
+
|
| 8 |
+
from app.services.grok.statsig import get_dynamic_headers
|
| 9 |
+
from app.core.exception import GrokApiException
|
| 10 |
+
from app.core.config import setting
|
| 11 |
+
from app.core.logger import logger
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Constants
|
| 15 |
+
ENDPOINT = "https://grok.com/rest/media/post/create"
|
| 16 |
+
TIMEOUT = 30
|
| 17 |
+
BROWSER = "chrome133a"
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class PostCreateManager:
|
| 21 |
+
"""Session creation manager"""
|
| 22 |
+
|
| 23 |
+
@staticmethod
|
| 24 |
+
async def create(file_id: str, file_uri: str, auth_token: str, statsig_id: str = "") -> Optional[Dict[str, Any]]:
|
| 25 |
+
"""Create session record
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
file_id: File ID
|
| 29 |
+
file_uri: File URI
|
| 30 |
+
auth_token: Auth token
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
Session info dict containing post_id, etc.
|
| 34 |
+
"""
|
| 35 |
+
# Parameter validation
|
| 36 |
+
if not file_id or not file_uri:
|
| 37 |
+
raise GrokApiException("Missing file ID or URI", "INVALID_PARAMS")
|
| 38 |
+
if not auth_token:
|
| 39 |
+
raise GrokApiException("Missing auth token", "NO_AUTH_TOKEN")
|
| 40 |
+
|
| 41 |
+
try:
|
| 42 |
+
# Build request
|
| 43 |
+
data = {
|
| 44 |
+
"media_url": f"https://assets.grok.com/{file_uri}",
|
| 45 |
+
"media_type": "MEDIA_POST_TYPE_IMAGE"
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
cf = setting.grok_config.get("cf_clearance", "")
|
| 49 |
+
headers = {
|
| 50 |
+
**get_dynamic_headers("/rest/media/post/create", statsig_id=statsig_id),
|
| 51 |
+
"Cookie": f"{auth_token};{cf}" if cf else auth_token
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
# Outer retry: configurable status codes (401/429, etc)
|
| 55 |
+
retry_codes = setting.grok_config.get("retry_status_codes", [401, 429])
|
| 56 |
+
MAX_OUTER_RETRY = 3
|
| 57 |
+
|
| 58 |
+
for outer_retry in range(MAX_OUTER_RETRY + 1): # +1 ensures 3 retries
|
| 59 |
+
# Inner retry: 403 with proxy pool
|
| 60 |
+
max_403_retries = 5
|
| 61 |
+
retry_403_count = 0
|
| 62 |
+
|
| 63 |
+
while retry_403_count <= max_403_retries:
|
| 64 |
+
# Fetch proxy asynchronously (proxy pool supported)
|
| 65 |
+
from app.core.proxy_pool import proxy_pool
|
| 66 |
+
|
| 67 |
+
# If retrying 403 with proxy pool, force refresh
|
| 68 |
+
if retry_403_count > 0 and proxy_pool._enabled:
|
| 69 |
+
logger.info(f"[PostCreate] 403 retry {retry_403_count}/{max_403_retries}, refreshing proxy...")
|
| 70 |
+
proxy = await proxy_pool.force_refresh()
|
| 71 |
+
else:
|
| 72 |
+
proxy = await setting.get_proxy_async("service")
|
| 73 |
+
|
| 74 |
+
proxies = {"http": proxy, "https": proxy} if proxy else None
|
| 75 |
+
|
| 76 |
+
# Send request
|
| 77 |
+
async with AsyncSession() as session:
|
| 78 |
+
response = await session.post(
|
| 79 |
+
ENDPOINT,
|
| 80 |
+
headers=headers,
|
| 81 |
+
json=data,
|
| 82 |
+
impersonate=BROWSER,
|
| 83 |
+
timeout=TIMEOUT,
|
| 84 |
+
proxies=proxies
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
# Inner 403 retry: only when proxy pool is enabled
|
| 88 |
+
if response.status_code == 403 and proxy_pool._enabled:
|
| 89 |
+
retry_403_count += 1
|
| 90 |
+
|
| 91 |
+
if retry_403_count <= max_403_retries:
|
| 92 |
+
logger.warning(f"[PostCreate] 403 error, retrying ({retry_403_count}/{max_403_retries})...")
|
| 93 |
+
await asyncio.sleep(0.5)
|
| 94 |
+
continue
|
| 95 |
+
|
| 96 |
+
# All inner retries failed
|
| 97 |
+
logger.error(f"[PostCreate] 403 error, retried {retry_403_count-1} times, giving up")
|
| 98 |
+
|
| 99 |
+
# Check retryable status codes - outer retry
|
| 100 |
+
if response.status_code in retry_codes:
|
| 101 |
+
if outer_retry < MAX_OUTER_RETRY:
|
| 102 |
+
delay = (outer_retry + 1) * 0.1 # Progressive delay: 0.1s, 0.2s, 0.3s
|
| 103 |
+
logger.warning(f"[PostCreate] {response.status_code} error, outer retry ({outer_retry+1}/{MAX_OUTER_RETRY}), waiting {delay}s...")
|
| 104 |
+
await asyncio.sleep(delay)
|
| 105 |
+
break # Exit inner loop for outer retry
|
| 106 |
+
else:
|
| 107 |
+
logger.error(f"[PostCreate] {response.status_code} error, retried {outer_retry} times, giving up")
|
| 108 |
+
raise GrokApiException(f"Create failed: {response.status_code} error", "CREATE_ERROR")
|
| 109 |
+
|
| 110 |
+
if response.status_code == 200:
|
| 111 |
+
result = response.json()
|
| 112 |
+
post_id = result.get("post", {}).get("id", "")
|
| 113 |
+
|
| 114 |
+
if outer_retry > 0 or retry_403_count > 0:
|
| 115 |
+
logger.info("[PostCreate] Retry succeeded")
|
| 116 |
+
|
| 117 |
+
logger.debug(f"[PostCreate] Success, session ID: {post_id}")
|
| 118 |
+
return {
|
| 119 |
+
"post_id": post_id,
|
| 120 |
+
"file_id": file_id,
|
| 121 |
+
"file_uri": file_uri,
|
| 122 |
+
"success": True,
|
| 123 |
+
"data": result
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
# Other error handling
|
| 127 |
+
try:
|
| 128 |
+
error = response.json()
|
| 129 |
+
msg = f"Status: {response.status_code}, details: {error}"
|
| 130 |
+
except:
|
| 131 |
+
msg = f"Status: {response.status_code}, details: {response.text[:200]}"
|
| 132 |
+
|
| 133 |
+
logger.error(f"[PostCreate] Failed: {msg}")
|
| 134 |
+
raise GrokApiException(f"Create failed: {msg}", "CREATE_ERROR")
|
| 135 |
+
|
| 136 |
+
except GrokApiException:
|
| 137 |
+
raise
|
| 138 |
+
except Exception as e:
|
| 139 |
+
logger.error(f"[PostCreate] Error: {e}")
|
| 140 |
+
raise GrokApiException(f"Create error: {e}", "CREATE_ERROR") from e
|
app/services/grok/processer.py
ADDED
|
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Grok API response processor - streaming and non-streaming responses"""
|
| 2 |
+
|
| 3 |
+
import orjson
|
| 4 |
+
import uuid
|
| 5 |
+
import time
|
| 6 |
+
import asyncio
|
| 7 |
+
from typing import AsyncGenerator, Tuple, Any
|
| 8 |
+
|
| 9 |
+
from app.core.config import setting
|
| 10 |
+
from app.core.exception import GrokApiException
|
| 11 |
+
from app.core.logger import logger
|
| 12 |
+
from app.models.openai_schema import (
|
| 13 |
+
OpenAIChatCompletionResponse,
|
| 14 |
+
OpenAIChatCompletionChoice,
|
| 15 |
+
OpenAIChatCompletionMessage,
|
| 16 |
+
OpenAIChatCompletionChunkResponse,
|
| 17 |
+
OpenAIChatCompletionChunkChoice,
|
| 18 |
+
OpenAIChatCompletionChunkMessage
|
| 19 |
+
)
|
| 20 |
+
from app.services.grok.cache import image_cache_service, video_cache_service
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class StreamTimeoutManager:
|
| 24 |
+
"""Streaming response timeout manager"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, chunk_timeout: int = 120, first_timeout: int = 30, total_timeout: int = 600):
|
| 27 |
+
self.chunk_timeout = chunk_timeout
|
| 28 |
+
self.first_timeout = first_timeout
|
| 29 |
+
self.total_timeout = total_timeout
|
| 30 |
+
self.start_time = asyncio.get_event_loop().time()
|
| 31 |
+
self.last_chunk_time = self.start_time
|
| 32 |
+
self.first_received = False
|
| 33 |
+
|
| 34 |
+
def check_timeout(self) -> Tuple[bool, str]:
|
| 35 |
+
"""Check timeouts"""
|
| 36 |
+
now = asyncio.get_event_loop().time()
|
| 37 |
+
|
| 38 |
+
if not self.first_received and now - self.start_time > self.first_timeout:
|
| 39 |
+
return True, f"First response timeout ({self.first_timeout}s)"
|
| 40 |
+
|
| 41 |
+
if self.total_timeout > 0 and now - self.start_time > self.total_timeout:
|
| 42 |
+
return True, f"Total timeout ({self.total_timeout}s)"
|
| 43 |
+
|
| 44 |
+
if self.first_received and now - self.last_chunk_time > self.chunk_timeout:
|
| 45 |
+
return True, f"Chunk timeout ({self.chunk_timeout}s)"
|
| 46 |
+
|
| 47 |
+
return False, ""
|
| 48 |
+
|
| 49 |
+
def mark_received(self):
|
| 50 |
+
"""Mark data received"""
|
| 51 |
+
self.last_chunk_time = asyncio.get_event_loop().time()
|
| 52 |
+
self.first_received = True
|
| 53 |
+
|
| 54 |
+
def duration(self) -> float:
|
| 55 |
+
"""Get total duration"""
|
| 56 |
+
return asyncio.get_event_loop().time() - self.start_time
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class GrokResponseProcessor:
|
| 60 |
+
"""Grok response processor"""
|
| 61 |
+
|
| 62 |
+
@staticmethod
|
| 63 |
+
async def process_normal(response, auth_token: str, model: str = None) -> OpenAIChatCompletionResponse:
|
| 64 |
+
"""Process non-streaming response"""
|
| 65 |
+
response_closed = False
|
| 66 |
+
try:
|
| 67 |
+
async for chunk in response.aiter_lines():
|
| 68 |
+
if not chunk:
|
| 69 |
+
continue
|
| 70 |
+
|
| 71 |
+
data = orjson.loads(chunk)
|
| 72 |
+
|
| 73 |
+
# Error check
|
| 74 |
+
if error := data.get("error"):
|
| 75 |
+
raise GrokApiException(
|
| 76 |
+
f"API error: {error.get('message', 'Unknown error')}",
|
| 77 |
+
"API_ERROR",
|
| 78 |
+
{"code": error.get("code")}
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
grok_resp = data.get("result", {}).get("response", {})
|
| 82 |
+
|
| 83 |
+
# Video response
|
| 84 |
+
if video_resp := grok_resp.get("streamingVideoGenerationResponse"):
|
| 85 |
+
if video_url := video_resp.get("videoUrl"):
|
| 86 |
+
content = await GrokResponseProcessor._build_video_content(video_url, auth_token)
|
| 87 |
+
result = GrokResponseProcessor._build_response(content, model or "grok-imagine-0.9")
|
| 88 |
+
response_closed = True
|
| 89 |
+
response.close()
|
| 90 |
+
return result
|
| 91 |
+
|
| 92 |
+
# Model response
|
| 93 |
+
model_response = grok_resp.get("modelResponse")
|
| 94 |
+
if not model_response:
|
| 95 |
+
continue
|
| 96 |
+
|
| 97 |
+
if error_msg := model_response.get("error"):
|
| 98 |
+
raise GrokApiException(f"Model error: {error_msg}", "MODEL_ERROR")
|
| 99 |
+
|
| 100 |
+
# Build content
|
| 101 |
+
content = model_response.get("message", "")
|
| 102 |
+
model_name = model_response.get("model")
|
| 103 |
+
|
| 104 |
+
# Process images
|
| 105 |
+
if images := model_response.get("generatedImageUrls"):
|
| 106 |
+
content = await GrokResponseProcessor._append_images(content, images, auth_token)
|
| 107 |
+
|
| 108 |
+
result = GrokResponseProcessor._build_response(content, model_name)
|
| 109 |
+
response_closed = True
|
| 110 |
+
response.close()
|
| 111 |
+
return result
|
| 112 |
+
|
| 113 |
+
raise GrokApiException("No response data", "NO_RESPONSE")
|
| 114 |
+
|
| 115 |
+
except orjson.JSONDecodeError as e:
|
| 116 |
+
logger.error(f"[Processor] JSON parse failed: {e}")
|
| 117 |
+
raise GrokApiException(f"JSON parse failed: {e}", "JSON_ERROR") from e
|
| 118 |
+
except Exception as e:
|
| 119 |
+
logger.error(f"[Processor] Processing error: {type(e).__name__}: {e}")
|
| 120 |
+
raise GrokApiException(f"Response processing error: {e}", "PROCESS_ERROR") from e
|
| 121 |
+
finally:
|
| 122 |
+
if not response_closed and hasattr(response, 'close'):
|
| 123 |
+
try:
|
| 124 |
+
response.close()
|
| 125 |
+
except Exception as e:
|
| 126 |
+
logger.warning(f"[Processor] Failed to close response: {e}")
|
| 127 |
+
|
| 128 |
+
@staticmethod
|
| 129 |
+
async def process_stream(response, auth_token: str, session: Any = None) -> AsyncGenerator[str, None]:
|
| 130 |
+
"""Process streaming response"""
|
| 131 |
+
# State variables
|
| 132 |
+
is_image = False
|
| 133 |
+
is_thinking = False
|
| 134 |
+
thinking_finished = False
|
| 135 |
+
model = None
|
| 136 |
+
filtered_tags = setting.grok_config.get("filtered_tags", "").split(",")
|
| 137 |
+
video_progress_started = False
|
| 138 |
+
last_video_progress = -1
|
| 139 |
+
response_closed = False
|
| 140 |
+
show_thinking = setting.grok_config.get("show_thinking", True)
|
| 141 |
+
|
| 142 |
+
# Timeout management
|
| 143 |
+
timeout_mgr = StreamTimeoutManager(
|
| 144 |
+
chunk_timeout=setting.grok_config.get("stream_chunk_timeout", 120),
|
| 145 |
+
first_timeout=setting.grok_config.get("stream_first_response_timeout", 30),
|
| 146 |
+
total_timeout=setting.grok_config.get("stream_total_timeout", 600)
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
def make_chunk(content: str, finish: str = None):
|
| 150 |
+
"""Build response chunk"""
|
| 151 |
+
chunk_data = OpenAIChatCompletionChunkResponse(
|
| 152 |
+
id=f"chatcmpl-{uuid.uuid4()}",
|
| 153 |
+
created=int(time.time()),
|
| 154 |
+
model=model or "grok-4-mini-thinking-tahoe",
|
| 155 |
+
choices=[OpenAIChatCompletionChunkChoice(
|
| 156 |
+
index=0,
|
| 157 |
+
delta=OpenAIChatCompletionChunkMessage(
|
| 158 |
+
role="assistant",
|
| 159 |
+
content=content
|
| 160 |
+
) if content else {},
|
| 161 |
+
finish_reason=finish
|
| 162 |
+
)]
|
| 163 |
+
)
|
| 164 |
+
return f"data: {chunk_data.model_dump_json()}\n\n"
|
| 165 |
+
|
| 166 |
+
try:
|
| 167 |
+
async for chunk in response.aiter_lines():
|
| 168 |
+
# Timeout check
|
| 169 |
+
is_timeout, timeout_msg = timeout_mgr.check_timeout()
|
| 170 |
+
if is_timeout:
|
| 171 |
+
logger.warning(f"[Processor] {timeout_msg}")
|
| 172 |
+
yield make_chunk("", "stop")
|
| 173 |
+
yield "data: [DONE]\n\n"
|
| 174 |
+
return
|
| 175 |
+
|
| 176 |
+
logger.debug(f"[Processor] Received chunk: {len(chunk)} bytes")
|
| 177 |
+
if not chunk:
|
| 178 |
+
continue
|
| 179 |
+
|
| 180 |
+
try:
|
| 181 |
+
data = orjson.loads(chunk)
|
| 182 |
+
|
| 183 |
+
# Error check
|
| 184 |
+
if error := data.get("error"):
|
| 185 |
+
error_msg = error.get('message', 'Unknown error')
|
| 186 |
+
logger.error(f"[Processor] API error: {error_msg}")
|
| 187 |
+
yield make_chunk(f"Error: {error_msg}", "stop")
|
| 188 |
+
yield "data: [DONE]\n\n"
|
| 189 |
+
return
|
| 190 |
+
|
| 191 |
+
grok_resp = data.get("result", {}).get("response", {})
|
| 192 |
+
logger.debug(f"[Processor] Parsed response: {len(grok_resp)} bytes")
|
| 193 |
+
if not grok_resp:
|
| 194 |
+
continue
|
| 195 |
+
|
| 196 |
+
timeout_mgr.mark_received()
|
| 197 |
+
|
| 198 |
+
# Update model
|
| 199 |
+
if user_resp := grok_resp.get("userResponse"):
|
| 200 |
+
if m := user_resp.get("model"):
|
| 201 |
+
model = m
|
| 202 |
+
|
| 203 |
+
# Video handling
|
| 204 |
+
if video_resp := grok_resp.get("streamingVideoGenerationResponse"):
|
| 205 |
+
progress = video_resp.get("progress", 0)
|
| 206 |
+
v_url = video_resp.get("videoUrl")
|
| 207 |
+
|
| 208 |
+
# Progress updates
|
| 209 |
+
if progress > last_video_progress:
|
| 210 |
+
last_video_progress = progress
|
| 211 |
+
if show_thinking:
|
| 212 |
+
if not video_progress_started:
|
| 213 |
+
content = f"<think>Video generated {progress}%\n"
|
| 214 |
+
video_progress_started = True
|
| 215 |
+
elif progress < 100:
|
| 216 |
+
content = f"Video generated {progress}%\n"
|
| 217 |
+
else:
|
| 218 |
+
content = f"Video generated {progress}%</think>\n"
|
| 219 |
+
yield make_chunk(content)
|
| 220 |
+
|
| 221 |
+
# Video URL
|
| 222 |
+
if v_url:
|
| 223 |
+
logger.debug("[Processor] Video generation complete")
|
| 224 |
+
video_content = await GrokResponseProcessor._build_video_content(v_url, auth_token)
|
| 225 |
+
yield make_chunk(video_content)
|
| 226 |
+
|
| 227 |
+
continue
|
| 228 |
+
|
| 229 |
+
# Image mode
|
| 230 |
+
if grok_resp.get("imageAttachmentInfo"):
|
| 231 |
+
is_image = True
|
| 232 |
+
|
| 233 |
+
token = grok_resp.get("token", "")
|
| 234 |
+
|
| 235 |
+
# Image handling
|
| 236 |
+
if is_image:
|
| 237 |
+
if model_resp := grok_resp.get("modelResponse"):
|
| 238 |
+
image_mode = setting.global_config.get("image_mode", "url")
|
| 239 |
+
content = ""
|
| 240 |
+
|
| 241 |
+
for img in model_resp.get("generatedImageUrls", []):
|
| 242 |
+
try:
|
| 243 |
+
if image_mode == "base64":
|
| 244 |
+
# Base64 mode - send in chunks
|
| 245 |
+
base64_str = await image_cache_service.download_base64(f"/{img}", auth_token)
|
| 246 |
+
if base64_str:
|
| 247 |
+
# Chunk large data
|
| 248 |
+
if not base64_str.startswith("data:"):
|
| 249 |
+
parts = base64_str.split(",", 1)
|
| 250 |
+
if len(parts) == 2:
|
| 251 |
+
yield make_chunk(f"
|
| 252 |
+
# 8KB chunks
|
| 253 |
+
for i in range(0, len(parts[1]), 8192):
|
| 254 |
+
yield make_chunk(parts[1][i:i+8192])
|
| 255 |
+
yield make_chunk(")\n")
|
| 256 |
+
else:
|
| 257 |
+
yield make_chunk(f"\n")
|
| 258 |
+
else:
|
| 259 |
+
yield make_chunk(f"\n")
|
| 260 |
+
else:
|
| 261 |
+
yield make_chunk(f"\n")
|
| 262 |
+
else:
|
| 263 |
+
# URL mode
|
| 264 |
+
await image_cache_service.download_image(f"/{img}", auth_token)
|
| 265 |
+
img_path = img.replace('/', '-')
|
| 266 |
+
base_url = setting.global_config.get("base_url", "")
|
| 267 |
+
img_url = f"{base_url}/images/{img_path}" if base_url else f"/images/{img_path}"
|
| 268 |
+
content += f"\n"
|
| 269 |
+
except Exception as e:
|
| 270 |
+
logger.warning(f"[Processor] Failed to process image: {e}")
|
| 271 |
+
content += f"\n"
|
| 272 |
+
|
| 273 |
+
yield make_chunk(content.strip(), "stop")
|
| 274 |
+
return
|
| 275 |
+
elif token:
|
| 276 |
+
yield make_chunk(token)
|
| 277 |
+
|
| 278 |
+
# Chat handling
|
| 279 |
+
else:
|
| 280 |
+
if isinstance(token, list):
|
| 281 |
+
continue
|
| 282 |
+
|
| 283 |
+
if any(tag in token for tag in filtered_tags if token):
|
| 284 |
+
continue
|
| 285 |
+
|
| 286 |
+
current_is_thinking = grok_resp.get("isThinking", False)
|
| 287 |
+
message_tag = grok_resp.get("messageTag")
|
| 288 |
+
|
| 289 |
+
if thinking_finished and current_is_thinking:
|
| 290 |
+
continue
|
| 291 |
+
|
| 292 |
+
# Search results handling
|
| 293 |
+
if grok_resp.get("toolUsageCardId"):
|
| 294 |
+
if web_search := grok_resp.get("webSearchResults"):
|
| 295 |
+
if current_is_thinking:
|
| 296 |
+
if show_thinking:
|
| 297 |
+
for result in web_search.get("results", []):
|
| 298 |
+
title = result.get("title", "")
|
| 299 |
+
url = result.get("url", "")
|
| 300 |
+
preview = result.get("preview", "")
|
| 301 |
+
preview_clean = preview.replace("\n", "") if isinstance(preview, str) else ""
|
| 302 |
+
token += f'\n- [{title}]({url} "{preview_clean}")'
|
| 303 |
+
token += "\n"
|
| 304 |
+
else:
|
| 305 |
+
continue
|
| 306 |
+
else:
|
| 307 |
+
continue
|
| 308 |
+
else:
|
| 309 |
+
continue
|
| 310 |
+
|
| 311 |
+
if token:
|
| 312 |
+
content = token
|
| 313 |
+
|
| 314 |
+
if message_tag == "header":
|
| 315 |
+
content = f"\n\n{token}\n\n"
|
| 316 |
+
|
| 317 |
+
# Thinking state transitions
|
| 318 |
+
should_skip = False
|
| 319 |
+
if not is_thinking and current_is_thinking:
|
| 320 |
+
if show_thinking:
|
| 321 |
+
content = f"<think>\n{content}"
|
| 322 |
+
else:
|
| 323 |
+
should_skip = True
|
| 324 |
+
elif is_thinking and not current_is_thinking:
|
| 325 |
+
if show_thinking:
|
| 326 |
+
content = f"\n</think>\n{content}"
|
| 327 |
+
thinking_finished = True
|
| 328 |
+
elif current_is_thinking:
|
| 329 |
+
if not show_thinking:
|
| 330 |
+
should_skip = True
|
| 331 |
+
|
| 332 |
+
if not should_skip:
|
| 333 |
+
yield make_chunk(content)
|
| 334 |
+
|
| 335 |
+
is_thinking = current_is_thinking
|
| 336 |
+
|
| 337 |
+
except (orjson.JSONDecodeError, UnicodeDecodeError) as e:
|
| 338 |
+
logger.warning(f"[Processor] Parse failed: {e}")
|
| 339 |
+
continue
|
| 340 |
+
except Exception as e:
|
| 341 |
+
logger.warning(f"[Processor] Processing error: {e}")
|
| 342 |
+
continue
|
| 343 |
+
|
| 344 |
+
yield make_chunk("", "stop")
|
| 345 |
+
yield "data: [DONE]\n\n"
|
| 346 |
+
logger.info(f"[Processor] Streaming complete, duration: {timeout_mgr.duration():.2f}s")
|
| 347 |
+
|
| 348 |
+
except Exception as e:
|
| 349 |
+
logger.error(f"[Processor] Fatal error: {e}")
|
| 350 |
+
yield make_chunk(f"Processing error: {e}", "error")
|
| 351 |
+
yield "data: [DONE]\n\n"
|
| 352 |
+
finally:
|
| 353 |
+
if not response_closed and hasattr(response, 'close'):
|
| 354 |
+
try:
|
| 355 |
+
response.close()
|
| 356 |
+
logger.debug("[Processor] Response closed")
|
| 357 |
+
except Exception as e:
|
| 358 |
+
logger.warning(f"[Processor] Close failed: {e}")
|
| 359 |
+
|
| 360 |
+
if session:
|
| 361 |
+
try:
|
| 362 |
+
await session.close()
|
| 363 |
+
logger.debug("[Processor] Session closed")
|
| 364 |
+
except Exception as e:
|
| 365 |
+
logger.warning(f"[Processor] Failed to close session: {e}")
|
| 366 |
+
|
| 367 |
+
@staticmethod
|
| 368 |
+
async def _build_video_content(video_url: str, auth_token: str) -> str:
|
| 369 |
+
"""Build video content"""
|
| 370 |
+
logger.debug(f"[Processor] Video detected: {video_url}")
|
| 371 |
+
full_url = f"https://assets.grok.com/{video_url}"
|
| 372 |
+
|
| 373 |
+
try:
|
| 374 |
+
cache_path = await video_cache_service.download_video(f"/{video_url}", auth_token)
|
| 375 |
+
if cache_path:
|
| 376 |
+
video_path = video_url.replace('/', '-')
|
| 377 |
+
base_url = setting.global_config.get("base_url", "")
|
| 378 |
+
local_url = f"{base_url}/images/{video_path}" if base_url else f"/images/{video_path}"
|
| 379 |
+
return f'<video src="{local_url}" controls="controls" width="500" height="300"></video>\n'
|
| 380 |
+
except Exception as e:
|
| 381 |
+
logger.warning(f"[Processor] Failed to cache video: {e}")
|
| 382 |
+
|
| 383 |
+
return f'<video src="{full_url}" controls="controls" width="500" height="300"></video>\n'
|
| 384 |
+
|
| 385 |
+
@staticmethod
|
| 386 |
+
async def _append_images(content: str, images: list, auth_token: str) -> str:
|
| 387 |
+
"""Append images to content"""
|
| 388 |
+
image_mode = setting.global_config.get("image_mode", "url")
|
| 389 |
+
|
| 390 |
+
for img in images:
|
| 391 |
+
try:
|
| 392 |
+
if image_mode == "base64":
|
| 393 |
+
base64_str = await image_cache_service.download_base64(f"/{img}", auth_token)
|
| 394 |
+
if base64_str:
|
| 395 |
+
content += f"\n"
|
| 396 |
+
else:
|
| 397 |
+
content += f"\n"
|
| 398 |
+
else:
|
| 399 |
+
cache_path = await image_cache_service.download_image(f"/{img}", auth_token)
|
| 400 |
+
if cache_path:
|
| 401 |
+
img_path = img.replace('/', '-')
|
| 402 |
+
base_url = setting.global_config.get("base_url", "")
|
| 403 |
+
img_url = f"{base_url}/images/{img_path}" if base_url else f"/images/{img_path}"
|
| 404 |
+
content += f"\n"
|
| 405 |
+
else:
|
| 406 |
+
content += f"\n"
|
| 407 |
+
except Exception as e:
|
| 408 |
+
logger.warning(f"[Processor] Failed to process image: {e}")
|
| 409 |
+
content += f"\n"
|
| 410 |
+
|
| 411 |
+
return content
|
| 412 |
+
|
| 413 |
+
@staticmethod
|
| 414 |
+
def _build_response(content: str, model: str) -> OpenAIChatCompletionResponse:
|
| 415 |
+
"""Build response object"""
|
| 416 |
+
return OpenAIChatCompletionResponse(
|
| 417 |
+
id=f"chatcmpl-{uuid.uuid4()}",
|
| 418 |
+
object="chat.completion",
|
| 419 |
+
created=int(time.time()),
|
| 420 |
+
model=model,
|
| 421 |
+
choices=[OpenAIChatCompletionChoice(
|
| 422 |
+
index=0,
|
| 423 |
+
message=OpenAIChatCompletionMessage(
|
| 424 |
+
role="assistant",
|
| 425 |
+
content=content
|
| 426 |
+
),
|
| 427 |
+
finish_reason="stop"
|
| 428 |
+
)],
|
| 429 |
+
usage=None
|
| 430 |
+
)
|
app/services/grok/statsig.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Grok header manager - generate dynamic headers and Statsig ID"""
|
| 2 |
+
|
| 3 |
+
import base64
|
| 4 |
+
import random
|
| 5 |
+
import string
|
| 6 |
+
import uuid
|
| 7 |
+
from typing import Dict
|
| 8 |
+
|
| 9 |
+
from app.core.logger import logger
|
| 10 |
+
from app.core.config import setting
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# Base headers
|
| 14 |
+
BASE_HEADERS = {
|
| 15 |
+
"Accept": "*/*",
|
| 16 |
+
"Accept-Language": "zh-CN,zh;q=0.9",
|
| 17 |
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
| 18 |
+
"Connection": "keep-alive",
|
| 19 |
+
"Origin": "https://grok.com",
|
| 20 |
+
"Priority": "u=1, i",
|
| 21 |
+
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
|
| 22 |
+
"Sec-Ch-Ua": '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
|
| 23 |
+
"Sec-Ch-Ua-Mobile": "?0",
|
| 24 |
+
"Sec-Ch-Ua-Platform": '"macOS"',
|
| 25 |
+
"Sec-Fetch-Dest": "empty",
|
| 26 |
+
"Sec-Fetch-Mode": "cors",
|
| 27 |
+
"Sec-Fetch-Site": "same-origin",
|
| 28 |
+
"Baggage": "sentry-environment=production,sentry-public_key=b311e0f2690c81f25e2c4cf6d4f7ce1c",
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _random_string(length: int, letters_only: bool = True) -> str:
|
| 33 |
+
"""Generate random string"""
|
| 34 |
+
chars = string.ascii_lowercase if letters_only else string.ascii_lowercase + string.digits
|
| 35 |
+
return ''.join(random.choices(chars, k=length))
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def _generate_statsig_id() -> str:
|
| 39 |
+
"""Generate x-statsig-id
|
| 40 |
+
|
| 41 |
+
Randomly choose between two formats:
|
| 42 |
+
1. e:TypeError: Cannot read properties of null (reading 'children['xxxxx']')
|
| 43 |
+
2. e:TypeError: Cannot read properties of undefined (reading 'xxxxxxxxxx')
|
| 44 |
+
"""
|
| 45 |
+
if random.choice([True, False]):
|
| 46 |
+
rand = _random_string(5, letters_only=False)
|
| 47 |
+
msg = f"e:TypeError: Cannot read properties of null (reading 'children['{rand}']')"
|
| 48 |
+
else:
|
| 49 |
+
rand = _random_string(10)
|
| 50 |
+
msg = f"e:TypeError: Cannot read properties of undefined (reading '{rand}')"
|
| 51 |
+
|
| 52 |
+
return base64.b64encode(msg.encode()).decode()
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def get_dynamic_headers(pathname: str = "/rest/app-chat/conversations/new", statsig_id: str = "") -> Dict[str, str]:
|
| 56 |
+
"""Get request headers
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
pathname: Request path
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
Full request header dict
|
| 63 |
+
"""
|
| 64 |
+
# Get or generate statsig-id
|
| 65 |
+
if statsig_id:
|
| 66 |
+
logger.debug(f"[Statsig] Using request override: {statsig_id}")
|
| 67 |
+
elif setting.grok_config.get("dynamic_statsig", False):
|
| 68 |
+
statsig_id = _generate_statsig_id()
|
| 69 |
+
logger.debug(f"[Statsig] Generated dynamically: {statsig_id}")
|
| 70 |
+
else:
|
| 71 |
+
statsig_id = setting.grok_config.get("x_statsig_id")
|
| 72 |
+
if not statsig_id:
|
| 73 |
+
raise ValueError("x_statsig_id is not set in the config file")
|
| 74 |
+
logger.debug(f"[Statsig] Using fixed value: {statsig_id}")
|
| 75 |
+
|
| 76 |
+
# Build headers
|
| 77 |
+
headers = BASE_HEADERS.copy()
|
| 78 |
+
headers["x-statsig-id"] = statsig_id
|
| 79 |
+
headers["x-xai-request-id"] = str(uuid.uuid4())
|
| 80 |
+
headers["Content-Type"] = "text/plain;charset=UTF-8" if "upload-file" in pathname else "application/json"
|
| 81 |
+
|
| 82 |
+
return headers
|
app/services/grok/token.py
ADDED
|
@@ -0,0 +1,649 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Grok token manager - singleton token load balancing and status management"""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import orjson
|
| 5 |
+
import time
|
| 6 |
+
import asyncio
|
| 7 |
+
import aiofiles
|
| 8 |
+
import portalocker
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from curl_cffi.requests import AsyncSession
|
| 11 |
+
from typing import Dict, Any, Optional, Tuple
|
| 12 |
+
|
| 13 |
+
from app.models.grok_models import TokenType, Models
|
| 14 |
+
from app.core.exception import GrokApiException
|
| 15 |
+
from app.core.logger import logger
|
| 16 |
+
from app.core.config import setting
|
| 17 |
+
from app.services.grok.statsig import get_dynamic_headers
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Constants
|
| 21 |
+
RATE_LIMIT_API = "https://grok.com/rest/rate-limits"
|
| 22 |
+
TIMEOUT = 30
|
| 23 |
+
BROWSER = "chrome133a"
|
| 24 |
+
MAX_FAILURES = 3
|
| 25 |
+
TOKEN_INVALID = 401
|
| 26 |
+
STATSIG_INVALID = 403
|
| 27 |
+
|
| 28 |
+
# Cooldown constants
|
| 29 |
+
COOLDOWN_REQUESTS = 5 # Cooldown requests for normal failures
|
| 30 |
+
COOLDOWN_429_WITH_QUOTA = 3600 # 429 with quota: 1 hour cooldown (seconds)
|
| 31 |
+
COOLDOWN_429_NO_QUOTA = 36000 # 429 no quota: 10 hour cooldown (seconds)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class GrokTokenManager:
|
| 35 |
+
"""Token manager (singleton)"""
|
| 36 |
+
|
| 37 |
+
_instance: Optional['GrokTokenManager'] = None
|
| 38 |
+
_lock = asyncio.Lock()
|
| 39 |
+
|
| 40 |
+
def __new__(cls) -> 'GrokTokenManager':
|
| 41 |
+
if cls._instance is None:
|
| 42 |
+
cls._instance = super().__new__(cls)
|
| 43 |
+
return cls._instance
|
| 44 |
+
|
| 45 |
+
def __init__(self):
|
| 46 |
+
if hasattr(self, '_initialized'):
|
| 47 |
+
return
|
| 48 |
+
|
| 49 |
+
self.token_file = self._resolve_data_dir() / "token.json"
|
| 50 |
+
self._file_lock = asyncio.Lock()
|
| 51 |
+
self.token_file.parent.mkdir(parents=True, exist_ok=True)
|
| 52 |
+
self._storage = None
|
| 53 |
+
self.token_data = None # Lazy load
|
| 54 |
+
|
| 55 |
+
# Batch save queue
|
| 56 |
+
self._save_pending = False # Pending save flag
|
| 57 |
+
self._save_task = None # Background save task
|
| 58 |
+
self._shutdown = False # Shutdown flag
|
| 59 |
+
|
| 60 |
+
# Cooldown state
|
| 61 |
+
self._cooldown_counts: Dict[str, int] = {} # Token -> remaining cooldown requests
|
| 62 |
+
self._request_counter = 0 # Global request counter
|
| 63 |
+
|
| 64 |
+
# Refresh state
|
| 65 |
+
self._refresh_lock = False # Refresh lock
|
| 66 |
+
self._refresh_progress: Dict[str, Any] = {"running": False, "current": 0, "total": 0, "success": 0, "failed": 0}
|
| 67 |
+
|
| 68 |
+
self._initialized = True
|
| 69 |
+
logger.debug(f"[Token] Initialized: {self.token_file}")
|
| 70 |
+
|
| 71 |
+
@staticmethod
|
| 72 |
+
def _resolve_data_dir() -> Path:
|
| 73 |
+
"""Resolve data directory for persistent storage."""
|
| 74 |
+
data_dir_env = os.getenv("DATA_DIR")
|
| 75 |
+
if data_dir_env:
|
| 76 |
+
return Path(data_dir_env)
|
| 77 |
+
if Path("/data").exists():
|
| 78 |
+
return Path("/data")
|
| 79 |
+
return Path(__file__).parents[3] / "data"
|
| 80 |
+
|
| 81 |
+
def _using_file_storage(self) -> bool:
|
| 82 |
+
"""Check if storage is file-based or unset."""
|
| 83 |
+
if self._storage is None:
|
| 84 |
+
return True
|
| 85 |
+
try:
|
| 86 |
+
from app.core.storage import FileStorage
|
| 87 |
+
return isinstance(self._storage, FileStorage)
|
| 88 |
+
except Exception:
|
| 89 |
+
return False
|
| 90 |
+
|
| 91 |
+
def set_storage(self, storage) -> None:
|
| 92 |
+
"""Set storage instance"""
|
| 93 |
+
self._storage = storage
|
| 94 |
+
data_dir = getattr(storage, "data_dir", None)
|
| 95 |
+
if data_dir:
|
| 96 |
+
self.token_file = Path(data_dir) / "token.json"
|
| 97 |
+
self.token_file.parent.mkdir(parents=True, exist_ok=True)
|
| 98 |
+
|
| 99 |
+
async def _load_data(self) -> None:
|
| 100 |
+
"""Load token data asynchronously (multi-process safe)"""
|
| 101 |
+
default = {TokenType.NORMAL.value: {}, TokenType.SUPER.value: {}}
|
| 102 |
+
|
| 103 |
+
def load_sync():
|
| 104 |
+
with open(self.token_file, "r", encoding="utf-8") as f:
|
| 105 |
+
portalocker.lock(f, portalocker.LOCK_SH)
|
| 106 |
+
try:
|
| 107 |
+
return orjson.loads(f.read())
|
| 108 |
+
finally:
|
| 109 |
+
portalocker.unlock(f)
|
| 110 |
+
|
| 111 |
+
try:
|
| 112 |
+
if self._storage and not self._using_file_storage():
|
| 113 |
+
data = await self._storage.load_tokens()
|
| 114 |
+
self.token_data = data or default
|
| 115 |
+
return
|
| 116 |
+
|
| 117 |
+
if self.token_file.exists():
|
| 118 |
+
# Read file with process lock
|
| 119 |
+
async with self._file_lock:
|
| 120 |
+
self.token_data = await asyncio.to_thread(load_sync)
|
| 121 |
+
else:
|
| 122 |
+
self.token_data = default
|
| 123 |
+
logger.debug("[Token] Created new data file")
|
| 124 |
+
except Exception as e:
|
| 125 |
+
logger.error(f"[Token] Load failed: {e}")
|
| 126 |
+
self.token_data = default
|
| 127 |
+
|
| 128 |
+
async def _save_data(self) -> None:
|
| 129 |
+
"""Save token data (multi-process safe)"""
|
| 130 |
+
def save_sync(data):
|
| 131 |
+
with open(self.token_file, "w", encoding="utf-8") as f:
|
| 132 |
+
portalocker.lock(f, portalocker.LOCK_EX)
|
| 133 |
+
try:
|
| 134 |
+
content = orjson.dumps(data, option=orjson.OPT_INDENT_2).decode()
|
| 135 |
+
f.write(content)
|
| 136 |
+
f.flush()
|
| 137 |
+
finally:
|
| 138 |
+
portalocker.unlock(f)
|
| 139 |
+
|
| 140 |
+
try:
|
| 141 |
+
if not self._storage:
|
| 142 |
+
async with self._file_lock:
|
| 143 |
+
await asyncio.to_thread(save_sync, self.token_data)
|
| 144 |
+
else:
|
| 145 |
+
await self._storage.save_tokens(self.token_data)
|
| 146 |
+
except Exception as e:
|
| 147 |
+
logger.error(f"[Token] Save failed: {e}")
|
| 148 |
+
raise GrokApiException(f"Save failed: {e}", "TOKEN_SAVE_ERROR")
|
| 149 |
+
|
| 150 |
+
def _mark_dirty(self) -> None:
|
| 151 |
+
"""Mark pending save"""
|
| 152 |
+
self._save_pending = True
|
| 153 |
+
|
| 154 |
+
async def _batch_save_worker(self) -> None:
|
| 155 |
+
"""Batch save background task"""
|
| 156 |
+
from app.core.config import setting
|
| 157 |
+
|
| 158 |
+
interval = setting.global_config.get("batch_save_interval", 1.0)
|
| 159 |
+
logger.info(f"[Token] Save task started, interval: {interval}s")
|
| 160 |
+
|
| 161 |
+
while not self._shutdown:
|
| 162 |
+
await asyncio.sleep(interval)
|
| 163 |
+
|
| 164 |
+
if self._save_pending and not self._shutdown:
|
| 165 |
+
try:
|
| 166 |
+
await self._save_data()
|
| 167 |
+
self._save_pending = False
|
| 168 |
+
logger.debug("[Token] Save completed")
|
| 169 |
+
except Exception as e:
|
| 170 |
+
logger.error(f"[Token] Save failed: {e}")
|
| 171 |
+
|
| 172 |
+
async def start_batch_save(self) -> None:
|
| 173 |
+
"""Start batch save task"""
|
| 174 |
+
if self._save_task is None:
|
| 175 |
+
self._save_task = asyncio.create_task(self._batch_save_worker())
|
| 176 |
+
logger.info("[Token] Save task created")
|
| 177 |
+
|
| 178 |
+
async def shutdown(self) -> None:
|
| 179 |
+
"""Shutdown and flush pending data"""
|
| 180 |
+
self._shutdown = True
|
| 181 |
+
|
| 182 |
+
if self._save_task:
|
| 183 |
+
self._save_task.cancel()
|
| 184 |
+
try:
|
| 185 |
+
await self._save_task
|
| 186 |
+
except asyncio.CancelledError:
|
| 187 |
+
pass
|
| 188 |
+
|
| 189 |
+
# Final flush
|
| 190 |
+
if self._save_pending:
|
| 191 |
+
await self._save_data()
|
| 192 |
+
logger.info("[Token] Flush completed on shutdown")
|
| 193 |
+
|
| 194 |
+
@staticmethod
|
| 195 |
+
def _extract_sso(auth_token: str) -> Optional[str]:
|
| 196 |
+
"""Extract SSO value"""
|
| 197 |
+
if "sso=" in auth_token:
|
| 198 |
+
return auth_token.split("sso=")[1].split(";")[0]
|
| 199 |
+
logger.warning("[Token] Unable to extract SSO value")
|
| 200 |
+
return None
|
| 201 |
+
|
| 202 |
+
def _find_token(self, sso: str) -> Tuple[Optional[str], Optional[Dict]]:
|
| 203 |
+
"""Find token"""
|
| 204 |
+
for token_type in [TokenType.NORMAL.value, TokenType.SUPER.value]:
|
| 205 |
+
if sso in self.token_data[token_type]:
|
| 206 |
+
return token_type, self.token_data[token_type][sso]
|
| 207 |
+
return None, None
|
| 208 |
+
|
| 209 |
+
async def add_token(self, tokens: list[str], token_type: TokenType) -> None:
|
| 210 |
+
"""Add tokens"""
|
| 211 |
+
if not tokens:
|
| 212 |
+
return
|
| 213 |
+
|
| 214 |
+
count = 0
|
| 215 |
+
for token in tokens:
|
| 216 |
+
if not token or not token.strip():
|
| 217 |
+
continue
|
| 218 |
+
|
| 219 |
+
self.token_data[token_type.value][token] = {
|
| 220 |
+
"createdTime": int(time.time() * 1000),
|
| 221 |
+
"remainingQueries": -1,
|
| 222 |
+
"heavyremainingQueries": -1,
|
| 223 |
+
"status": "active",
|
| 224 |
+
"failedCount": 0,
|
| 225 |
+
"lastFailureTime": None,
|
| 226 |
+
"lastFailureReason": None,
|
| 227 |
+
"tags": [],
|
| 228 |
+
"note": ""
|
| 229 |
+
}
|
| 230 |
+
count += 1
|
| 231 |
+
|
| 232 |
+
self._mark_dirty() # Batch save
|
| 233 |
+
logger.info(f"[Token] Added {count} {token_type.value} tokens")
|
| 234 |
+
|
| 235 |
+
async def delete_token(self, tokens: list[str], token_type: TokenType) -> None:
|
| 236 |
+
"""Delete tokens"""
|
| 237 |
+
if not tokens:
|
| 238 |
+
return
|
| 239 |
+
|
| 240 |
+
count = 0
|
| 241 |
+
for token in tokens:
|
| 242 |
+
if token in self.token_data[token_type.value]:
|
| 243 |
+
del self.token_data[token_type.value][token]
|
| 244 |
+
count += 1
|
| 245 |
+
|
| 246 |
+
self._mark_dirty() # Batch save
|
| 247 |
+
logger.info(f"[Token] Deleted {count} {token_type.value} tokens")
|
| 248 |
+
|
| 249 |
+
async def update_token_tags(self, token: str, token_type: TokenType, tags: list[str]) -> None:
|
| 250 |
+
"""Update token tags"""
|
| 251 |
+
if token not in self.token_data[token_type.value]:
|
| 252 |
+
raise GrokApiException("Token not found", "TOKEN_NOT_FOUND", {"token": token[:10]})
|
| 253 |
+
|
| 254 |
+
cleaned = [t.strip() for t in tags if t and t.strip()]
|
| 255 |
+
self.token_data[token_type.value][token]["tags"] = cleaned
|
| 256 |
+
self._mark_dirty() # Batch save
|
| 257 |
+
logger.info(f"[Token] Updated tags: {token[:10]}... -> {cleaned}")
|
| 258 |
+
|
| 259 |
+
async def update_token_note(self, token: str, token_type: TokenType, note: str) -> None:
|
| 260 |
+
"""Update token note"""
|
| 261 |
+
if token not in self.token_data[token_type.value]:
|
| 262 |
+
raise GrokApiException("Token not found", "TOKEN_NOT_FOUND", {"token": token[:10]})
|
| 263 |
+
|
| 264 |
+
self.token_data[token_type.value][token]["note"] = note.strip()
|
| 265 |
+
self._mark_dirty() # Batch save
|
| 266 |
+
logger.info(f"[Token] Updated note: {token[:10]}...")
|
| 267 |
+
|
| 268 |
+
def get_tokens(self) -> Dict[str, Any]:
|
| 269 |
+
"""Get all tokens"""
|
| 270 |
+
return self.token_data.copy()
|
| 271 |
+
|
| 272 |
+
async def _reload_if_needed(self) -> None:
|
| 273 |
+
"""Reload data in multi-process mode"""
|
| 274 |
+
# Reload only for file storage in multi-process mode
|
| 275 |
+
if not self._using_file_storage():
|
| 276 |
+
return
|
| 277 |
+
|
| 278 |
+
def reload_sync():
|
| 279 |
+
with open(self.token_file, "r", encoding="utf-8") as f:
|
| 280 |
+
portalocker.lock(f, portalocker.LOCK_SH)
|
| 281 |
+
try:
|
| 282 |
+
return orjson.loads(f.read())
|
| 283 |
+
finally:
|
| 284 |
+
portalocker.unlock(f)
|
| 285 |
+
|
| 286 |
+
try:
|
| 287 |
+
if self.token_file.exists():
|
| 288 |
+
self.token_data = await asyncio.to_thread(reload_sync)
|
| 289 |
+
except Exception as e:
|
| 290 |
+
logger.warning(f"[Token] Reload failed: {e}")
|
| 291 |
+
|
| 292 |
+
async def get_token(self, model: str) -> str:
|
| 293 |
+
"""Get token"""
|
| 294 |
+
jwt = await self.select_token(model)
|
| 295 |
+
return f"sso-rw={jwt};sso={jwt}"
|
| 296 |
+
|
| 297 |
+
async def select_token(self, model: str) -> str:
|
| 298 |
+
"""Select best token (multi-process safe, with cooldown)"""
|
| 299 |
+
# Reload latest data (multi-process mode)
|
| 300 |
+
await self._reload_if_needed()
|
| 301 |
+
|
| 302 |
+
# Decrement request-based cooldown counters
|
| 303 |
+
self._request_counter += 1
|
| 304 |
+
for token in list(self._cooldown_counts.keys()):
|
| 305 |
+
self._cooldown_counts[token] -= 1
|
| 306 |
+
if self._cooldown_counts[token] <= 0:
|
| 307 |
+
del self._cooldown_counts[token]
|
| 308 |
+
logger.debug(f"[Token] Cooldown ended: {token[:10]}...")
|
| 309 |
+
|
| 310 |
+
current_time = time.time() * 1000 # milliseconds
|
| 311 |
+
|
| 312 |
+
def select_best(tokens: Dict[str, Any], field: str) -> Tuple[Optional[str], Optional[int]]:
|
| 313 |
+
"""Select best token"""
|
| 314 |
+
unused, used = [], []
|
| 315 |
+
|
| 316 |
+
for key, data in tokens.items():
|
| 317 |
+
# Skip expired tokens
|
| 318 |
+
if data.get("status") == "expired":
|
| 319 |
+
continue
|
| 320 |
+
|
| 321 |
+
# Skip tokens with too many failures (any error status)
|
| 322 |
+
if data.get("failedCount", 0) >= MAX_FAILURES:
|
| 323 |
+
continue
|
| 324 |
+
|
| 325 |
+
# Skip request cooldown tokens
|
| 326 |
+
if key in self._cooldown_counts:
|
| 327 |
+
continue
|
| 328 |
+
|
| 329 |
+
# Skip time cooldown tokens (429)
|
| 330 |
+
cooldown_until = data.get("cooldownUntil", 0)
|
| 331 |
+
if cooldown_until and cooldown_until > current_time:
|
| 332 |
+
continue
|
| 333 |
+
|
| 334 |
+
remaining = int(data.get(field, -1))
|
| 335 |
+
if remaining == 0:
|
| 336 |
+
continue
|
| 337 |
+
|
| 338 |
+
if remaining == -1:
|
| 339 |
+
unused.append(key)
|
| 340 |
+
elif remaining > 0:
|
| 341 |
+
used.append((key, remaining))
|
| 342 |
+
|
| 343 |
+
if unused:
|
| 344 |
+
return unused[0], -1
|
| 345 |
+
if used:
|
| 346 |
+
used.sort(key=lambda x: x[1], reverse=True)
|
| 347 |
+
return used[0][0], used[0][1]
|
| 348 |
+
return None, None
|
| 349 |
+
|
| 350 |
+
# Snapshot
|
| 351 |
+
snapshot = {
|
| 352 |
+
TokenType.NORMAL.value: self.token_data[TokenType.NORMAL.value].copy(),
|
| 353 |
+
TokenType.SUPER.value: self.token_data[TokenType.SUPER.value].copy()
|
| 354 |
+
}
|
| 355 |
+
|
| 356 |
+
# Selection strategy
|
| 357 |
+
if model == "grok-4-heavy":
|
| 358 |
+
field = "heavyremainingQueries"
|
| 359 |
+
token_key, remaining = select_best(snapshot[TokenType.SUPER.value], field)
|
| 360 |
+
else:
|
| 361 |
+
field = "remainingQueries"
|
| 362 |
+
token_key, remaining = select_best(snapshot[TokenType.NORMAL.value], field)
|
| 363 |
+
if token_key is None:
|
| 364 |
+
token_key, remaining = select_best(snapshot[TokenType.SUPER.value], field)
|
| 365 |
+
|
| 366 |
+
if token_key is None:
|
| 367 |
+
raise GrokApiException(
|
| 368 |
+
f"No available token: {model}",
|
| 369 |
+
"NO_AVAILABLE_TOKEN",
|
| 370 |
+
{
|
| 371 |
+
"model": model,
|
| 372 |
+
"normal": len(snapshot[TokenType.NORMAL.value]),
|
| 373 |
+
"super": len(snapshot[TokenType.SUPER.value]),
|
| 374 |
+
"cooldown_count": len(self._cooldown_counts)
|
| 375 |
+
}
|
| 376 |
+
)
|
| 377 |
+
|
| 378 |
+
status = "unused" if remaining == -1 else f"{remaining} remaining"
|
| 379 |
+
logger.debug(f"[Token] Assigned token: {model} ({status})")
|
| 380 |
+
return token_key
|
| 381 |
+
|
| 382 |
+
async def check_limits(self, auth_token: str, model: str) -> Optional[Dict[str, Any]]:
|
| 383 |
+
"""Check rate limits"""
|
| 384 |
+
try:
|
| 385 |
+
rate_model = Models.to_rate_limit(model)
|
| 386 |
+
payload = {"requestKind": "DEFAULT", "modelName": rate_model}
|
| 387 |
+
|
| 388 |
+
cf = setting.grok_config.get("cf_clearance", "")
|
| 389 |
+
headers = get_dynamic_headers("/rest/rate-limits")
|
| 390 |
+
headers["Cookie"] = f"{auth_token};{cf}" if cf else auth_token
|
| 391 |
+
|
| 392 |
+
# Outer retry: configurable status codes (401/429, etc)
|
| 393 |
+
retry_codes = setting.grok_config.get("retry_status_codes", [401, 429])
|
| 394 |
+
MAX_OUTER_RETRY = 3
|
| 395 |
+
|
| 396 |
+
for outer_retry in range(MAX_OUTER_RETRY + 1): # +1 ensures 3 retries
|
| 397 |
+
# Inner retry: 403 with proxy pool
|
| 398 |
+
max_403_retries = 5
|
| 399 |
+
retry_403_count = 0
|
| 400 |
+
|
| 401 |
+
while retry_403_count <= max_403_retries:
|
| 402 |
+
# Fetch proxy asynchronously (proxy pool supported)
|
| 403 |
+
from app.core.proxy_pool import proxy_pool
|
| 404 |
+
|
| 405 |
+
# If retrying 403 with proxy pool, force refresh
|
| 406 |
+
if retry_403_count > 0 and proxy_pool._enabled:
|
| 407 |
+
logger.info(f"[Token] 403 retry {retry_403_count}/{max_403_retries}, refreshing proxy...")
|
| 408 |
+
proxy = await proxy_pool.force_refresh()
|
| 409 |
+
else:
|
| 410 |
+
proxy = await setting.get_proxy_async("service")
|
| 411 |
+
|
| 412 |
+
proxies = {"http": proxy, "https": proxy} if proxy else None
|
| 413 |
+
|
| 414 |
+
async with AsyncSession() as session:
|
| 415 |
+
response = await session.post(
|
| 416 |
+
RATE_LIMIT_API,
|
| 417 |
+
headers=headers,
|
| 418 |
+
json=payload,
|
| 419 |
+
impersonate=BROWSER,
|
| 420 |
+
timeout=TIMEOUT,
|
| 421 |
+
proxies=proxies
|
| 422 |
+
)
|
| 423 |
+
|
| 424 |
+
# Inner 403 retry: only when proxy pool is enabled
|
| 425 |
+
if response.status_code == 403 and proxy_pool._enabled:
|
| 426 |
+
retry_403_count += 1
|
| 427 |
+
|
| 428 |
+
if retry_403_count <= max_403_retries:
|
| 429 |
+
logger.warning(f"[Token] 403 error, retrying ({retry_403_count}/{max_403_retries})...")
|
| 430 |
+
await asyncio.sleep(0.5)
|
| 431 |
+
continue
|
| 432 |
+
|
| 433 |
+
# All inner retries failed
|
| 434 |
+
logger.error(f"[Token] 403 error, retried {retry_403_count-1} times, giving up")
|
| 435 |
+
sso = self._extract_sso(auth_token)
|
| 436 |
+
if sso:
|
| 437 |
+
await self.record_failure(auth_token, 403, "Server blocked")
|
| 438 |
+
|
| 439 |
+
# Check retryable status codes - outer retry
|
| 440 |
+
if response.status_code in retry_codes:
|
| 441 |
+
if outer_retry < MAX_OUTER_RETRY:
|
| 442 |
+
delay = (outer_retry + 1) * 0.1 # Progressive delay: 0.1s, 0.2s, 0.3s
|
| 443 |
+
logger.warning(f"[Token] {response.status_code} error, outer retry ({outer_retry+1}/{MAX_OUTER_RETRY}), waiting {delay}s...")
|
| 444 |
+
await asyncio.sleep(delay)
|
| 445 |
+
break # Exit inner loop for outer retry
|
| 446 |
+
else:
|
| 447 |
+
logger.error(f"[Token] {response.status_code} error, retried {outer_retry} times, giving up")
|
| 448 |
+
sso = self._extract_sso(auth_token)
|
| 449 |
+
if sso:
|
| 450 |
+
if response.status_code == 401:
|
| 451 |
+
await self.record_failure(auth_token, 401, "Token expired")
|
| 452 |
+
else:
|
| 453 |
+
await self.record_failure(auth_token, response.status_code, f"Error: {response.status_code}")
|
| 454 |
+
return None
|
| 455 |
+
|
| 456 |
+
if response.status_code == 200:
|
| 457 |
+
data = response.json()
|
| 458 |
+
sso = self._extract_sso(auth_token)
|
| 459 |
+
|
| 460 |
+
if outer_retry > 0 or retry_403_count > 0:
|
| 461 |
+
logger.info("[Token] Retry succeeded")
|
| 462 |
+
|
| 463 |
+
if sso:
|
| 464 |
+
if model == "grok-4-heavy":
|
| 465 |
+
await self.update_limits(sso, normal=None, heavy=data.get("remainingQueries", -1))
|
| 466 |
+
logger.info(f"[Token] Updated limits: {sso[:10]}..., heavy={data.get('remainingQueries', -1)}")
|
| 467 |
+
else:
|
| 468 |
+
await self.update_limits(sso, normal=data.get("remainingTokens", -1), heavy=None)
|
| 469 |
+
logger.info(f"[Token] Updated limits: {sso[:10]}..., basic={data.get('remainingTokens', -1)}")
|
| 470 |
+
|
| 471 |
+
return data
|
| 472 |
+
else:
|
| 473 |
+
# Other errors
|
| 474 |
+
logger.warning(f"[Token] Failed to get limits: {response.status_code}")
|
| 475 |
+
sso = self._extract_sso(auth_token)
|
| 476 |
+
if sso:
|
| 477 |
+
await self.record_failure(auth_token, response.status_code, f"Error: {response.status_code}")
|
| 478 |
+
return None
|
| 479 |
+
|
| 480 |
+
except Exception as e:
|
| 481 |
+
logger.error(f"[Token] Limit check error: {e}")
|
| 482 |
+
return None
|
| 483 |
+
|
| 484 |
+
async def update_limits(self, sso: str, normal: Optional[int] = None, heavy: Optional[int] = None) -> None:
|
| 485 |
+
"""Update limits"""
|
| 486 |
+
try:
|
| 487 |
+
for token_type in [TokenType.NORMAL.value, TokenType.SUPER.value]:
|
| 488 |
+
if sso in self.token_data[token_type]:
|
| 489 |
+
if normal is not None:
|
| 490 |
+
self.token_data[token_type][sso]["remainingQueries"] = normal
|
| 491 |
+
if heavy is not None:
|
| 492 |
+
self.token_data[token_type][sso]["heavyremainingQueries"] = heavy
|
| 493 |
+
self._mark_dirty() # Batch save
|
| 494 |
+
logger.info(f"[Token] Updated limits: {sso[:10]}...")
|
| 495 |
+
return
|
| 496 |
+
logger.warning(f"[Token] Not found: {sso[:10]}...")
|
| 497 |
+
except Exception as e:
|
| 498 |
+
logger.error(f"[Token] Update limits error: {e}")
|
| 499 |
+
|
| 500 |
+
async def record_failure(self, auth_token: str, status: int, msg: str) -> None:
|
| 501 |
+
"""Record failure"""
|
| 502 |
+
try:
|
| 503 |
+
if status == STATSIG_INVALID:
|
| 504 |
+
logger.warning("[Token] IP blocked. Try: 1) change IP 2) use proxy 3) set CF clearance")
|
| 505 |
+
return
|
| 506 |
+
|
| 507 |
+
sso = self._extract_sso(auth_token)
|
| 508 |
+
if not sso:
|
| 509 |
+
return
|
| 510 |
+
|
| 511 |
+
_, data = self._find_token(sso)
|
| 512 |
+
if not data:
|
| 513 |
+
logger.warning(f"[Token] Not found: {sso[:10]}...")
|
| 514 |
+
return
|
| 515 |
+
|
| 516 |
+
data["failedCount"] = data.get("failedCount", 0) + 1
|
| 517 |
+
data["lastFailureTime"] = int(time.time() * 1000)
|
| 518 |
+
data["lastFailureReason"] = f"{status}: {msg}"
|
| 519 |
+
|
| 520 |
+
logger.warning(
|
| 521 |
+
f"[Token] Failed: {sso[:10]}... (status: {status}), "
|
| 522 |
+
f"count: {data['failedCount']}/{MAX_FAILURES}, reason: {msg}"
|
| 523 |
+
)
|
| 524 |
+
|
| 525 |
+
if 400 <= status < 500 and data["failedCount"] >= MAX_FAILURES:
|
| 526 |
+
data["status"] = "expired"
|
| 527 |
+
logger.error(f"[Token] Marked expired: {sso[:10]}... ({status} errors x{data['failedCount']})")
|
| 528 |
+
|
| 529 |
+
self._mark_dirty() # Batch save
|
| 530 |
+
|
| 531 |
+
except Exception as e:
|
| 532 |
+
logger.error(f"[Token] Record failure error: {e}")
|
| 533 |
+
|
| 534 |
+
async def reset_failure(self, auth_token: str) -> None:
|
| 535 |
+
"""Reset failure count"""
|
| 536 |
+
try:
|
| 537 |
+
sso = self._extract_sso(auth_token)
|
| 538 |
+
if not sso:
|
| 539 |
+
return
|
| 540 |
+
|
| 541 |
+
_, data = self._find_token(sso)
|
| 542 |
+
if not data:
|
| 543 |
+
return
|
| 544 |
+
|
| 545 |
+
if data.get("failedCount", 0) > 0:
|
| 546 |
+
data["failedCount"] = 0
|
| 547 |
+
data["lastFailureTime"] = None
|
| 548 |
+
data["lastFailureReason"] = None
|
| 549 |
+
self._mark_dirty() # Batch save
|
| 550 |
+
logger.info(f"[Token] Reset failure count: {sso[:10]}...")
|
| 551 |
+
|
| 552 |
+
except Exception as e:
|
| 553 |
+
logger.error(f"[Token] Reset failure error: {e}")
|
| 554 |
+
|
| 555 |
+
async def apply_cooldown(self, auth_token: str, status_code: int) -> None:
|
| 556 |
+
"""Apply cooldown policy
|
| 557 |
+
- 429 errors: time-based cooldown (1 hour with quota, 10 hours without)
|
| 558 |
+
- Other errors: request-based cooldown (5 requests)
|
| 559 |
+
"""
|
| 560 |
+
try:
|
| 561 |
+
sso = self._extract_sso(auth_token)
|
| 562 |
+
if not sso:
|
| 563 |
+
return
|
| 564 |
+
|
| 565 |
+
_, data = self._find_token(sso)
|
| 566 |
+
if not data:
|
| 567 |
+
return
|
| 568 |
+
|
| 569 |
+
remaining = data.get("remainingQueries", -1)
|
| 570 |
+
|
| 571 |
+
if status_code == 429:
|
| 572 |
+
# 429 time-based cooldown
|
| 573 |
+
if remaining > 0 or remaining == -1:
|
| 574 |
+
# With quota: 1 hour cooldown
|
| 575 |
+
cooldown_until = time.time() + COOLDOWN_429_WITH_QUOTA
|
| 576 |
+
logger.info(f"[Token] 429 cooldown (with quota): {sso[:10]}... 1 hour")
|
| 577 |
+
else:
|
| 578 |
+
# No quota: 10 hour cooldown
|
| 579 |
+
cooldown_until = time.time() + COOLDOWN_429_NO_QUOTA
|
| 580 |
+
logger.info(f"[Token] 429 cooldown (no quota): {sso[:10]}... 10 hours")
|
| 581 |
+
data["cooldownUntil"] = int(cooldown_until * 1000)
|
| 582 |
+
self._mark_dirty()
|
| 583 |
+
else:
|
| 584 |
+
# Other errors use request-based cooldown (only if quota remains)
|
| 585 |
+
if remaining != 0:
|
| 586 |
+
self._cooldown_counts[sso] = COOLDOWN_REQUESTS
|
| 587 |
+
logger.info(f"[Token] Request cooldown: {sso[:10]}... {COOLDOWN_REQUESTS} requests")
|
| 588 |
+
|
| 589 |
+
except Exception as e:
|
| 590 |
+
logger.error(f"[Token] Apply cooldown error: {e}")
|
| 591 |
+
|
| 592 |
+
async def refresh_all_limits(self) -> Dict[str, Any]:
|
| 593 |
+
"""Refresh remaining counts for all tokens"""
|
| 594 |
+
# Check if refresh is already running
|
| 595 |
+
if self._refresh_lock:
|
| 596 |
+
return {"error": "refresh_in_progress", "message": "A refresh task is already running", "progress": self._refresh_progress}
|
| 597 |
+
|
| 598 |
+
# Acquire lock
|
| 599 |
+
self._refresh_lock = True
|
| 600 |
+
|
| 601 |
+
try:
|
| 602 |
+
# Compute total
|
| 603 |
+
all_tokens = []
|
| 604 |
+
for token_type in [TokenType.NORMAL.value, TokenType.SUPER.value]:
|
| 605 |
+
for sso in list(self.token_data[token_type].keys()):
|
| 606 |
+
all_tokens.append((token_type, sso))
|
| 607 |
+
|
| 608 |
+
total = len(all_tokens)
|
| 609 |
+
self._refresh_progress = {"running": True, "current": 0, "total": total, "success": 0, "failed": 0}
|
| 610 |
+
|
| 611 |
+
success_count = 0
|
| 612 |
+
fail_count = 0
|
| 613 |
+
|
| 614 |
+
for i, (token_type, sso) in enumerate(all_tokens):
|
| 615 |
+
auth_token = f"sso-rw={sso};sso={sso}"
|
| 616 |
+
try:
|
| 617 |
+
result = await self.check_limits(auth_token, "grok-4-fast")
|
| 618 |
+
if result:
|
| 619 |
+
success_count += 1
|
| 620 |
+
else:
|
| 621 |
+
fail_count += 1
|
| 622 |
+
except Exception as e:
|
| 623 |
+
logger.warning(f"[Token] Refresh failed: {sso[:10]}... - {e}")
|
| 624 |
+
fail_count += 1
|
| 625 |
+
|
| 626 |
+
# Update progress
|
| 627 |
+
self._refresh_progress = {
|
| 628 |
+
"running": True,
|
| 629 |
+
"current": i + 1,
|
| 630 |
+
"total": total,
|
| 631 |
+
"success": success_count,
|
| 632 |
+
"failed": fail_count
|
| 633 |
+
}
|
| 634 |
+
await asyncio.sleep(0.1) # Avoid flooding requests
|
| 635 |
+
|
| 636 |
+
logger.info(f"[Token] Batch refresh completed: success {success_count}, failed {fail_count}")
|
| 637 |
+
self._refresh_progress = {"running": False, "current": total, "total": total, "success": success_count, "failed": fail_count}
|
| 638 |
+
return {"success": success_count, "failed": fail_count, "total": total}
|
| 639 |
+
|
| 640 |
+
finally:
|
| 641 |
+
self._refresh_lock = False
|
| 642 |
+
|
| 643 |
+
def get_refresh_progress(self) -> Dict[str, Any]:
|
| 644 |
+
"""Get refresh progress"""
|
| 645 |
+
return self._refresh_progress.copy()
|
| 646 |
+
|
| 647 |
+
|
| 648 |
+
# Global instance
|
| 649 |
+
token_manager = GrokTokenManager()
|
app/services/grok/upload.py
ADDED
|
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Image upload manager - supports Base64 and URL image uploads"""
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import base64
|
| 5 |
+
import re
|
| 6 |
+
import time
|
| 7 |
+
import os
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from typing import Tuple, Optional
|
| 10 |
+
from urllib.parse import urlparse
|
| 11 |
+
from curl_cffi.requests import AsyncSession
|
| 12 |
+
|
| 13 |
+
from app.services.grok.statsig import get_dynamic_headers
|
| 14 |
+
from app.services.images.normalize import normalize_tmpfiles_url
|
| 15 |
+
from app.core.exception import GrokApiException
|
| 16 |
+
from app.core.config import setting
|
| 17 |
+
from app.core.logger import logger
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Constants
|
| 21 |
+
UPLOAD_API = "https://grok.com/rest/app-chat/upload-file"
|
| 22 |
+
TIMEOUT = 30
|
| 23 |
+
BROWSER = "chrome133a"
|
| 24 |
+
|
| 25 |
+
# MIME types
|
| 26 |
+
MIME_TYPES = {
|
| 27 |
+
'.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', '.png': 'image/png',
|
| 28 |
+
'.gif': 'image/gif', '.webp': 'image/webp', '.bmp': 'image/bmp',
|
| 29 |
+
}
|
| 30 |
+
DEFAULT_MIME = "image/jpeg"
|
| 31 |
+
DEFAULT_EXT = "jpg"
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class ImageUploadManager:
|
| 35 |
+
"""Image upload manager"""
|
| 36 |
+
|
| 37 |
+
@staticmethod
|
| 38 |
+
def _resolve_temp_dir() -> str:
|
| 39 |
+
"""Resolve temp directory for downloads."""
|
| 40 |
+
data_dir_env = os.getenv("DATA_DIR")
|
| 41 |
+
if data_dir_env:
|
| 42 |
+
base_dir = data_dir_env
|
| 43 |
+
elif os.path.isdir("/data"):
|
| 44 |
+
base_dir = "/data"
|
| 45 |
+
else:
|
| 46 |
+
base_dir = str(Path(__file__).parents[3] / "data")
|
| 47 |
+
return os.path.join(base_dir, "temp", "image")
|
| 48 |
+
|
| 49 |
+
@staticmethod
|
| 50 |
+
async def upload(image_input: str, auth_token: str, statsig_id: str = "") -> Tuple[str, str]:
|
| 51 |
+
"""Upload image (Base64 or URL)
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
(file_id, file_uri) tuple
|
| 55 |
+
"""
|
| 56 |
+
try:
|
| 57 |
+
# Detect type and handle
|
| 58 |
+
if ImageUploadManager._is_url(image_input):
|
| 59 |
+
buffer, mime = await ImageUploadManager._download(normalize_tmpfiles_url(image_input))
|
| 60 |
+
if not buffer:
|
| 61 |
+
raise GrokApiException("Image download failed", "IMAGE_DOWNLOAD_FAILED")
|
| 62 |
+
filename, _ = ImageUploadManager._get_info("", mime)
|
| 63 |
+
else:
|
| 64 |
+
buffer = image_input.split(",")[1] if "data:image" in image_input else image_input
|
| 65 |
+
filename, mime = ImageUploadManager._get_info(image_input)
|
| 66 |
+
|
| 67 |
+
# Build data
|
| 68 |
+
data = {
|
| 69 |
+
"fileName": filename,
|
| 70 |
+
"fileMimeType": mime,
|
| 71 |
+
"content": buffer,
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
if not auth_token:
|
| 76 |
+
raise GrokApiException("Missing authentication token", "NO_AUTH_TOKEN")
|
| 77 |
+
|
| 78 |
+
# Outer retry: configurable status codes (401/429, etc)
|
| 79 |
+
retry_codes = setting.grok_config.get("retry_status_codes", [401, 429])
|
| 80 |
+
MAX_OUTER_RETRY = 3
|
| 81 |
+
|
| 82 |
+
for outer_retry in range(MAX_OUTER_RETRY + 1): # +1 ensures 3 retries
|
| 83 |
+
try:
|
| 84 |
+
# Inner retry: 403 with proxy pool
|
| 85 |
+
max_403_retries = 5
|
| 86 |
+
retry_403_count = 0
|
| 87 |
+
|
| 88 |
+
while retry_403_count <= max_403_retries:
|
| 89 |
+
# Request config
|
| 90 |
+
cf = setting.grok_config.get("cf_clearance", "")
|
| 91 |
+
headers = {
|
| 92 |
+
**get_dynamic_headers("/rest/app-chat/upload-file", statsig_id=statsig_id),
|
| 93 |
+
"Cookie": f"{auth_token};{cf}" if cf else auth_token,
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
# Fetch proxy asynchronously (proxy pool supported)
|
| 97 |
+
from app.core.proxy_pool import proxy_pool
|
| 98 |
+
|
| 99 |
+
# If retrying 403 with proxy pool, force refresh
|
| 100 |
+
if retry_403_count > 0 and proxy_pool._enabled:
|
| 101 |
+
logger.info(f"[Upload] 403 retry {retry_403_count}/{max_403_retries}, refreshing proxy...")
|
| 102 |
+
proxy = await proxy_pool.force_refresh()
|
| 103 |
+
else:
|
| 104 |
+
proxy = await setting.get_proxy_async("service")
|
| 105 |
+
|
| 106 |
+
proxies = {"http": proxy, "https": proxy} if proxy else None
|
| 107 |
+
logger.info(f"[Upload] Proxy: {proxy[:60]}..." if proxy else "[Upload] Proxy: None")
|
| 108 |
+
|
| 109 |
+
# Upload
|
| 110 |
+
async with AsyncSession() as session:
|
| 111 |
+
response = await session.post(
|
| 112 |
+
UPLOAD_API,
|
| 113 |
+
headers=headers,
|
| 114 |
+
json=data,
|
| 115 |
+
impersonate=BROWSER,
|
| 116 |
+
timeout=TIMEOUT,
|
| 117 |
+
proxies=proxies,
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
# Inner 403 retry: only when proxy pool is enabled
|
| 121 |
+
if response.status_code == 403 and proxy_pool._enabled:
|
| 122 |
+
retry_403_count += 1
|
| 123 |
+
|
| 124 |
+
if retry_403_count <= max_403_retries:
|
| 125 |
+
logger.warning(f"[Upload] 403 error, retrying ({retry_403_count}/{max_403_retries})...")
|
| 126 |
+
await asyncio.sleep(0.5)
|
| 127 |
+
continue
|
| 128 |
+
|
| 129 |
+
# All inner retries failed
|
| 130 |
+
logger.error(f"[Upload] 403 error, retried {retry_403_count-1} times, giving up")
|
| 131 |
+
|
| 132 |
+
# Check retryable status codes - outer retry
|
| 133 |
+
if response.status_code in retry_codes:
|
| 134 |
+
if outer_retry < MAX_OUTER_RETRY:
|
| 135 |
+
delay = (outer_retry + 1) * 0.1 # Progressive delay: 0.1s, 0.2s, 0.3s
|
| 136 |
+
logger.warning(f"[Upload] {response.status_code} error, outer retry ({outer_retry+1}/{MAX_OUTER_RETRY}), waiting {delay}s...")
|
| 137 |
+
await asyncio.sleep(delay)
|
| 138 |
+
break # Exit inner loop for outer retry
|
| 139 |
+
else:
|
| 140 |
+
logger.error(f"[Upload] {response.status_code} error, retried {outer_retry} times, giving up")
|
| 141 |
+
return "", ""
|
| 142 |
+
|
| 143 |
+
if response.status_code == 200:
|
| 144 |
+
result = response.json()
|
| 145 |
+
file_id = result.get("fileMetadataId", "")
|
| 146 |
+
file_uri = result.get("fileUri", "")
|
| 147 |
+
|
| 148 |
+
if outer_retry > 0 or retry_403_count > 0:
|
| 149 |
+
logger.info("[Upload] Retry succeeded")
|
| 150 |
+
|
| 151 |
+
logger.debug(f"[Upload] Success, ID: {file_id}")
|
| 152 |
+
return file_id, file_uri
|
| 153 |
+
|
| 154 |
+
# Other errors
|
| 155 |
+
logger.error(f"[Upload] Failed, status code: {response.status_code}, body: {response.text[:500]}")
|
| 156 |
+
return "", ""
|
| 157 |
+
|
| 158 |
+
# Inner loop finished without break: 403 retries exhausted
|
| 159 |
+
return "", ""
|
| 160 |
+
|
| 161 |
+
except Exception as e:
|
| 162 |
+
if outer_retry < MAX_OUTER_RETRY - 1:
|
| 163 |
+
logger.warning(f"[Upload] Error: {e}, outer retry ({outer_retry+1}/{MAX_OUTER_RETRY})...")
|
| 164 |
+
await asyncio.sleep(0.5)
|
| 165 |
+
continue
|
| 166 |
+
|
| 167 |
+
logger.warning(f"[Upload] Failed: {e}")
|
| 168 |
+
return "", ""
|
| 169 |
+
|
| 170 |
+
return "", ""
|
| 171 |
+
|
| 172 |
+
except GrokApiException:
|
| 173 |
+
raise
|
| 174 |
+
except Exception as e:
|
| 175 |
+
logger.warning(f"[Upload] Failed: {e}")
|
| 176 |
+
return "", ""
|
| 177 |
+
|
| 178 |
+
@staticmethod
|
| 179 |
+
def _is_url(input_str: str) -> bool:
|
| 180 |
+
"""Check if URL"""
|
| 181 |
+
try:
|
| 182 |
+
result = urlparse(input_str)
|
| 183 |
+
return all([result.scheme, result.netloc]) and result.scheme in ['http', 'https']
|
| 184 |
+
except:
|
| 185 |
+
return False
|
| 186 |
+
|
| 187 |
+
@staticmethod
|
| 188 |
+
async def _download(url: str) -> Tuple[str, str]:
|
| 189 |
+
"""Download image and convert to Base64
|
| 190 |
+
|
| 191 |
+
Returns:
|
| 192 |
+
(base64_string, mime_type) tuple
|
| 193 |
+
"""
|
| 194 |
+
try:
|
| 195 |
+
timeout = setting.global_config.get("image_download_timeout", 30)
|
| 196 |
+
max_mb = setting.global_config.get("image_download_max_size_mb", 20)
|
| 197 |
+
max_bytes = int(max_mb * 1024 * 1024)
|
| 198 |
+
logger.debug(f"[Upload] Download start: {url}")
|
| 199 |
+
async with AsyncSession() as session:
|
| 200 |
+
response = await session.get(url, timeout=timeout)
|
| 201 |
+
response.raise_for_status()
|
| 202 |
+
|
| 203 |
+
content_type = response.headers.get('content-type', DEFAULT_MIME)
|
| 204 |
+
if not content_type.startswith('image/'):
|
| 205 |
+
raise GrokApiException("Unsupported image MIME type", "UNSUPPORTED_IMAGE_TYPE")
|
| 206 |
+
|
| 207 |
+
content_length = response.headers.get("content-length")
|
| 208 |
+
if content_length and int(content_length) > max_bytes:
|
| 209 |
+
raise GrokApiException("Image too large", "IMAGE_TOO_LARGE")
|
| 210 |
+
|
| 211 |
+
if len(response.content) > max_bytes:
|
| 212 |
+
raise GrokApiException("Image too large", "IMAGE_TOO_LARGE")
|
| 213 |
+
|
| 214 |
+
temp_dir = ImageUploadManager._resolve_temp_dir()
|
| 215 |
+
os.makedirs(temp_dir, exist_ok=True)
|
| 216 |
+
file_path = os.path.join(temp_dir, f"upload-{int(time.time() * 1000)}.img")
|
| 217 |
+
with open(file_path, "wb") as f:
|
| 218 |
+
f.write(response.content)
|
| 219 |
+
|
| 220 |
+
b64 = base64.b64encode(response.content).decode()
|
| 221 |
+
logger.debug(f"[Upload] Download success: {url}")
|
| 222 |
+
return b64, content_type
|
| 223 |
+
except GrokApiException:
|
| 224 |
+
raise
|
| 225 |
+
except Exception as e:
|
| 226 |
+
logger.warning(f"[Upload] Download failed: {e}")
|
| 227 |
+
return "", ""
|
| 228 |
+
|
| 229 |
+
@staticmethod
|
| 230 |
+
def _get_info(image_data: str, mime_type: Optional[str] = None) -> Tuple[str, str]:
|
| 231 |
+
"""Get filename and MIME type
|
| 232 |
+
|
| 233 |
+
Returns:
|
| 234 |
+
(file_name, mime_type) tuple
|
| 235 |
+
"""
|
| 236 |
+
# MIME type provided
|
| 237 |
+
if mime_type:
|
| 238 |
+
ext = mime_type.split("/")[1] if "/" in mime_type else DEFAULT_EXT
|
| 239 |
+
return f"image.{ext}", mime_type
|
| 240 |
+
|
| 241 |
+
# Extract from Base64
|
| 242 |
+
mime = DEFAULT_MIME
|
| 243 |
+
ext = DEFAULT_EXT
|
| 244 |
+
|
| 245 |
+
if "data:image" in image_data:
|
| 246 |
+
if match := re.search(r"data:([a-zA-Z0-9]+/[a-zA-Z0-9-.+]+);base64,", image_data):
|
| 247 |
+
mime = match.group(1)
|
| 248 |
+
ext = mime.split("/")[1]
|
| 249 |
+
|
| 250 |
+
return f"image.{ext}", mime
|
app/services/images/normalize.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Multimodal message normalization utilities."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import re
|
| 6 |
+
from typing import Any, Dict, List, Optional
|
| 7 |
+
|
| 8 |
+
from app.core.exception import GrokApiException
|
| 9 |
+
from app.core.logger import logger
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
_TMPFILES_RE = re.compile(r"^https?://tmpfiles\.org/(\d+)/(.+)$")
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def normalize_tmpfiles_url(url: str) -> str:
|
| 16 |
+
"""Normalize tmpfiles.org URLs to direct download links."""
|
| 17 |
+
if not url:
|
| 18 |
+
return url
|
| 19 |
+
match = _TMPFILES_RE.match(url.strip())
|
| 20 |
+
if not match:
|
| 21 |
+
return url.strip()
|
| 22 |
+
file_id, name = match.groups()
|
| 23 |
+
return f"https://tmpfiles.org/dl/{file_id}/{name}"
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def _normalize_image_url(image_url: Any) -> Optional[str]:
|
| 27 |
+
if isinstance(image_url, str):
|
| 28 |
+
return normalize_tmpfiles_url(image_url)
|
| 29 |
+
if isinstance(image_url, dict):
|
| 30 |
+
url = image_url.get("url")
|
| 31 |
+
return normalize_tmpfiles_url(url) if url else None
|
| 32 |
+
return None
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _normalize_content_part(part: Any) -> Dict[str, Any]:
|
| 36 |
+
if not isinstance(part, dict):
|
| 37 |
+
raise GrokApiException("Invalid content part: expected object", "INVALID_MULTIMODAL")
|
| 38 |
+
|
| 39 |
+
part_type = part.get("type")
|
| 40 |
+
if part_type == "text":
|
| 41 |
+
return {"type": "text", "text": str(part.get("text", ""))}
|
| 42 |
+
if part_type == "image_url":
|
| 43 |
+
url = _normalize_image_url(part.get("image_url") or part.get("url"))
|
| 44 |
+
if not url:
|
| 45 |
+
raise GrokApiException("Invalid image_url content part", "INVALID_MULTIMODAL")
|
| 46 |
+
return {"type": "image_url", "image_url": {"url": url}}
|
| 47 |
+
|
| 48 |
+
raise GrokApiException(f"Unsupported content type '{part_type}'", "INVALID_MULTIMODAL")
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def normalize_messages(
|
| 52 |
+
messages: List[Dict[str, Any]],
|
| 53 |
+
image_url: Optional[str] = None,
|
| 54 |
+
image_urls: Optional[List[str]] = None,
|
| 55 |
+
) -> List[Dict[str, Any]]:
|
| 56 |
+
"""Normalize messages to OpenAI-compatible multimodal format."""
|
| 57 |
+
if not messages:
|
| 58 |
+
raise GrokApiException("Message list cannot be empty", "INVALID_MULTIMODAL")
|
| 59 |
+
|
| 60 |
+
normalized: List[Dict[str, Any]] = []
|
| 61 |
+
for msg in messages:
|
| 62 |
+
if not isinstance(msg, dict):
|
| 63 |
+
raise GrokApiException("Each message must be an object", "INVALID_MULTIMODAL")
|
| 64 |
+
role = msg.get("role")
|
| 65 |
+
if role not in ("system", "user", "assistant"):
|
| 66 |
+
raise GrokApiException(f"Invalid role '{role}'", "INVALID_MULTIMODAL")
|
| 67 |
+
|
| 68 |
+
content = msg.get("content", "")
|
| 69 |
+
if isinstance(content, list):
|
| 70 |
+
parts = [_normalize_content_part(part) for part in content]
|
| 71 |
+
else:
|
| 72 |
+
parts = [{"type": "text", "text": str(content)}]
|
| 73 |
+
|
| 74 |
+
normalized.append({"role": role, "content": parts})
|
| 75 |
+
|
| 76 |
+
extra_urls: List[str] = []
|
| 77 |
+
if image_url:
|
| 78 |
+
extra_urls.append(normalize_tmpfiles_url(image_url))
|
| 79 |
+
if image_urls:
|
| 80 |
+
extra_urls.extend([normalize_tmpfiles_url(u) for u in image_urls if u])
|
| 81 |
+
|
| 82 |
+
if extra_urls:
|
| 83 |
+
for msg in reversed(normalized):
|
| 84 |
+
if msg["role"] == "user":
|
| 85 |
+
msg["content"].extend(
|
| 86 |
+
[{"type": "image_url", "image_url": {"url": u}} for u in extra_urls]
|
| 87 |
+
)
|
| 88 |
+
break
|
| 89 |
+
else:
|
| 90 |
+
raise GrokApiException("No user message to attach images", "INVALID_MULTIMODAL")
|
| 91 |
+
|
| 92 |
+
summary = [
|
| 93 |
+
{
|
| 94 |
+
"role": msg["role"],
|
| 95 |
+
"parts": [part.get("type") for part in msg.get("content", [])],
|
| 96 |
+
}
|
| 97 |
+
for msg in normalized
|
| 98 |
+
]
|
| 99 |
+
logger.debug(f"[Normalize] Messages: {summary}")
|
| 100 |
+
return normalized
|
app/services/mcp/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""MCP module initialization"""
|
| 3 |
+
|
| 4 |
+
from app.services.mcp.server import mcp
|
| 5 |
+
|
| 6 |
+
__all__ = ["mcp"]
|
app/services/mcp/server.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""FastMCP server instance"""
|
| 3 |
+
|
| 4 |
+
from fastmcp import FastMCP
|
| 5 |
+
from fastmcp.server.auth.providers.jwt import StaticTokenVerifier
|
| 6 |
+
from app.services.mcp.tools import ask_grok_impl
|
| 7 |
+
from app.core.config import setting
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def create_mcp_server() -> FastMCP:
|
| 11 |
+
"""Create MCP server instance, enable auth if API key is configured"""
|
| 12 |
+
# Check if API key is configured
|
| 13 |
+
api_key = setting.grok_config.get("api_key")
|
| 14 |
+
|
| 15 |
+
# Enable static token verification if API key is configured
|
| 16 |
+
auth = None
|
| 17 |
+
if api_key:
|
| 18 |
+
auth = StaticTokenVerifier(
|
| 19 |
+
tokens={
|
| 20 |
+
api_key: {
|
| 21 |
+
"client_id": "grok2api-client",
|
| 22 |
+
"scopes": ["read", "write", "admin"]
|
| 23 |
+
}
|
| 24 |
+
},
|
| 25 |
+
required_scopes=["read"]
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
# Create FastMCP instance
|
| 29 |
+
return FastMCP(
|
| 30 |
+
name="Grok2API-MCP",
|
| 31 |
+
instructions="MCP server providing Grok AI chat capabilities. Use ask_grok tool to interact with Grok AI models.",
|
| 32 |
+
auth=auth
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# Create global MCP instance
|
| 37 |
+
mcp = create_mcp_server()
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# Register ask_grok tool
|
| 41 |
+
@mcp.tool
|
| 42 |
+
async def ask_grok(
|
| 43 |
+
query: str,
|
| 44 |
+
model: str = "grok-3-fast",
|
| 45 |
+
system_prompt: str = None
|
| 46 |
+
) -> str:
|
| 47 |
+
"""
|
| 48 |
+
Call Grok AI for conversation, especially when users need the latest info, search, or social updates (Twitter/X, Reddit, etc).
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
query: User question or instruction
|
| 52 |
+
model: Grok model name, options: grok-3-fast (default), grok-4-fast, grok-4-fast-expert, grok-4-expert, grok-4-heavy
|
| 53 |
+
system_prompt: Optional system prompt to set AI role or constraints
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
Full Grok AI response, may include text and image links (Markdown)
|
| 57 |
+
|
| 58 |
+
Examples:
|
| 59 |
+
- Simple Q&A: ask_grok("What is Python?")
|
| 60 |
+
- Specify model: ask_grok("Explain quantum computing", model="grok-4-fast")
|
| 61 |
+
- With system prompt: ask_grok("Write a poem", system_prompt="You are a classical poet")
|
| 62 |
+
"""
|
| 63 |
+
return await ask_grok_impl(query, model, system_prompt)
|
app/services/mcp/tools.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""MCP Tools - Grok AI chat tool"""
|
| 3 |
+
|
| 4 |
+
import json
|
| 5 |
+
from typing import Optional
|
| 6 |
+
from app.services.grok.client import GrokClient
|
| 7 |
+
from app.core.logger import logger
|
| 8 |
+
from app.core.exception import GrokApiException
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
async def ask_grok_impl(
|
| 12 |
+
query: str,
|
| 13 |
+
model: str = "grok-3-fast",
|
| 14 |
+
system_prompt: Optional[str] = None
|
| 15 |
+
) -> str:
|
| 16 |
+
"""
|
| 17 |
+
Internal implementation: call Grok API and collect full response
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
query: User question
|
| 21 |
+
model: Model name
|
| 22 |
+
system_prompt: System prompt
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
str: Full Grok response content
|
| 26 |
+
"""
|
| 27 |
+
try:
|
| 28 |
+
# Build message list
|
| 29 |
+
messages = []
|
| 30 |
+
if system_prompt:
|
| 31 |
+
messages.append({"role": "system", "content": system_prompt})
|
| 32 |
+
messages.append({"role": "user", "content": query})
|
| 33 |
+
|
| 34 |
+
# Build request
|
| 35 |
+
request_data = {
|
| 36 |
+
"model": model,
|
| 37 |
+
"messages": messages,
|
| 38 |
+
"stream": True
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
logger.info(f"[MCP] ask_grok called, model: {model}")
|
| 42 |
+
|
| 43 |
+
# Call Grok client (streaming)
|
| 44 |
+
response_iterator = await GrokClient.openai_to_grok(request_data)
|
| 45 |
+
|
| 46 |
+
# Collect all streaming chunks
|
| 47 |
+
content_parts = []
|
| 48 |
+
async for chunk in response_iterator:
|
| 49 |
+
if isinstance(chunk, bytes):
|
| 50 |
+
chunk = chunk.decode('utf-8')
|
| 51 |
+
|
| 52 |
+
# Parse SSE format
|
| 53 |
+
if chunk.startswith("data: "):
|
| 54 |
+
data_str = chunk[6:].strip()
|
| 55 |
+
if data_str == "[DONE]":
|
| 56 |
+
break
|
| 57 |
+
|
| 58 |
+
try:
|
| 59 |
+
data = json.loads(data_str)
|
| 60 |
+
choices = data.get("choices", [])
|
| 61 |
+
if choices:
|
| 62 |
+
delta = choices[0].get("delta", {})
|
| 63 |
+
if content := delta.get("content"):
|
| 64 |
+
content_parts.append(content)
|
| 65 |
+
except json.JSONDecodeError:
|
| 66 |
+
continue
|
| 67 |
+
|
| 68 |
+
result = "".join(content_parts)
|
| 69 |
+
logger.info(f"[MCP] ask_grok completed, response length: {len(result)}")
|
| 70 |
+
return result
|
| 71 |
+
|
| 72 |
+
except GrokApiException as e:
|
| 73 |
+
logger.error(f"[MCP] Grok API error: {str(e)}")
|
| 74 |
+
raise Exception(f"Grok API call failed: {str(e)}")
|
| 75 |
+
except Exception as e:
|
| 76 |
+
logger.error(f"[MCP] ask_grok error: {str(e)}", exc_info=True)
|
| 77 |
+
raise Exception(f"Error processing request: {str(e)}")
|
app/services/request_logger.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Request log audit - record recent requests"""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import time
|
| 5 |
+
import asyncio
|
| 6 |
+
import orjson
|
| 7 |
+
from typing import List, Dict, Deque
|
| 8 |
+
from collections import deque
|
| 9 |
+
from dataclasses import dataclass, asdict
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
|
| 12 |
+
from app.core.logger import logger
|
| 13 |
+
|
| 14 |
+
@dataclass
|
| 15 |
+
class RequestLog:
|
| 16 |
+
id: str
|
| 17 |
+
time: str
|
| 18 |
+
timestamp: float
|
| 19 |
+
ip: str
|
| 20 |
+
model: str
|
| 21 |
+
duration: float
|
| 22 |
+
status: int
|
| 23 |
+
key_name: str
|
| 24 |
+
token_suffix: str
|
| 25 |
+
error: str = ""
|
| 26 |
+
|
| 27 |
+
class RequestLogger:
|
| 28 |
+
"""Request logger"""
|
| 29 |
+
|
| 30 |
+
_instance = None
|
| 31 |
+
|
| 32 |
+
def __new__(cls):
|
| 33 |
+
if cls._instance is None:
|
| 34 |
+
cls._instance = super().__new__(cls)
|
| 35 |
+
return cls._instance
|
| 36 |
+
|
| 37 |
+
def __init__(self, max_len: int = 1000):
|
| 38 |
+
if hasattr(self, '_initialized'):
|
| 39 |
+
return
|
| 40 |
+
|
| 41 |
+
data_dir_env = os.getenv("DATA_DIR")
|
| 42 |
+
if data_dir_env:
|
| 43 |
+
data_dir = Path(data_dir_env)
|
| 44 |
+
elif Path("/data").exists():
|
| 45 |
+
data_dir = Path("/data")
|
| 46 |
+
else:
|
| 47 |
+
data_dir = Path(__file__).parents[2] / "data"
|
| 48 |
+
|
| 49 |
+
self.file_path = data_dir / "logs.json"
|
| 50 |
+
self._logs: Deque[Dict] = deque(maxlen=max_len)
|
| 51 |
+
self._lock = asyncio.Lock()
|
| 52 |
+
self._loaded = False
|
| 53 |
+
|
| 54 |
+
self._initialized = True
|
| 55 |
+
|
| 56 |
+
async def init(self):
|
| 57 |
+
"""Initialize and load data"""
|
| 58 |
+
if not self._loaded:
|
| 59 |
+
await self._load_data()
|
| 60 |
+
|
| 61 |
+
async def _load_data(self):
|
| 62 |
+
"""Load log data from disk"""
|
| 63 |
+
if self._loaded:
|
| 64 |
+
return
|
| 65 |
+
|
| 66 |
+
if not self.file_path.exists():
|
| 67 |
+
self._loaded = True
|
| 68 |
+
return
|
| 69 |
+
|
| 70 |
+
try:
|
| 71 |
+
async with self._lock:
|
| 72 |
+
content = await asyncio.to_thread(self.file_path.read_bytes)
|
| 73 |
+
if content:
|
| 74 |
+
data = orjson.loads(content)
|
| 75 |
+
if isinstance(data, list):
|
| 76 |
+
self._logs.clear()
|
| 77 |
+
self._logs.extend(data)
|
| 78 |
+
self._loaded = True
|
| 79 |
+
logger.debug(f"[Logger] Logs loaded: {len(self._logs)} entries")
|
| 80 |
+
except Exception as e:
|
| 81 |
+
logger.error(f"[Logger] Failed to load logs: {e}")
|
| 82 |
+
self._loaded = True
|
| 83 |
+
|
| 84 |
+
async def _save_data(self):
|
| 85 |
+
"""Save log data to disk"""
|
| 86 |
+
if not self._loaded:
|
| 87 |
+
return
|
| 88 |
+
|
| 89 |
+
try:
|
| 90 |
+
# Ensure directory exists
|
| 91 |
+
self.file_path.parent.mkdir(parents=True, exist_ok=True)
|
| 92 |
+
|
| 93 |
+
async with self._lock:
|
| 94 |
+
# Save as list
|
| 95 |
+
content = orjson.dumps(list(self._logs))
|
| 96 |
+
await asyncio.to_thread(self.file_path.write_bytes, content)
|
| 97 |
+
except Exception as e:
|
| 98 |
+
logger.error(f"[Logger] Failed to save logs: {e}")
|
| 99 |
+
|
| 100 |
+
async def add_log(self,
|
| 101 |
+
ip: str,
|
| 102 |
+
model: str,
|
| 103 |
+
duration: float,
|
| 104 |
+
status: int,
|
| 105 |
+
key_name: str,
|
| 106 |
+
token_suffix: str = "",
|
| 107 |
+
error: str = ""):
|
| 108 |
+
"""Add log entry"""
|
| 109 |
+
if not self._loaded:
|
| 110 |
+
await self.init()
|
| 111 |
+
|
| 112 |
+
try:
|
| 113 |
+
now = time.time()
|
| 114 |
+
# Format timestamp
|
| 115 |
+
time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(now))
|
| 116 |
+
|
| 117 |
+
log = {
|
| 118 |
+
"id": str(int(now * 1000)),
|
| 119 |
+
"time": time_str,
|
| 120 |
+
"timestamp": now,
|
| 121 |
+
"ip": ip,
|
| 122 |
+
"model": model,
|
| 123 |
+
"duration": round(duration, 2),
|
| 124 |
+
"status": status,
|
| 125 |
+
"key_name": key_name,
|
| 126 |
+
"token_suffix": token_suffix,
|
| 127 |
+
"error": error
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
async with self._lock:
|
| 131 |
+
self._logs.appendleft(log) # Newest first
|
| 132 |
+
|
| 133 |
+
# Save asynchronously
|
| 134 |
+
asyncio.create_task(self._save_data())
|
| 135 |
+
|
| 136 |
+
except Exception as e:
|
| 137 |
+
logger.error(f"[Logger] Failed to record log: {e}")
|
| 138 |
+
|
| 139 |
+
async def get_logs(self, limit: int = 1000) -> List[Dict]:
|
| 140 |
+
"""Get logs"""
|
| 141 |
+
async with self._lock:
|
| 142 |
+
return list(self._logs)[:limit]
|
| 143 |
+
|
| 144 |
+
async def clear_logs(self):
|
| 145 |
+
"""Clear logs"""
|
| 146 |
+
async with self._lock:
|
| 147 |
+
self._logs.clear()
|
| 148 |
+
await self._save_data()
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
# Global instance
|
| 152 |
+
request_logger = RequestLogger()
|
app/services/request_stats.py
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Request stats module - hourly/daily request statistics"""
|
| 2 |
+
|
| 3 |
+
import time
|
| 4 |
+
import asyncio
|
| 5 |
+
import orjson
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
from typing import Dict, Any
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from collections import defaultdict
|
| 10 |
+
|
| 11 |
+
from app.core.logger import logger
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class RequestStats:
|
| 15 |
+
"""Request stats manager (singleton)"""
|
| 16 |
+
|
| 17 |
+
_instance = None
|
| 18 |
+
|
| 19 |
+
def __new__(cls):
|
| 20 |
+
if cls._instance is None:
|
| 21 |
+
cls._instance = super().__new__(cls)
|
| 22 |
+
return cls._instance
|
| 23 |
+
|
| 24 |
+
def __init__(self):
|
| 25 |
+
if hasattr(self, '_initialized'):
|
| 26 |
+
return
|
| 27 |
+
|
| 28 |
+
self.file_path = Path(__file__).parents[2] / "data" / "stats.json"
|
| 29 |
+
|
| 30 |
+
# Stats data
|
| 31 |
+
self._hourly: Dict[str, Dict[str, int]] = defaultdict(lambda: {"total": 0, "success": 0, "failed": 0})
|
| 32 |
+
self._daily: Dict[str, Dict[str, int]] = defaultdict(lambda: {"total": 0, "success": 0, "failed": 0})
|
| 33 |
+
self._models: Dict[str, int] = defaultdict(int)
|
| 34 |
+
|
| 35 |
+
# Retention policy
|
| 36 |
+
self._hourly_keep = 48 # Keep 48 hours
|
| 37 |
+
self._daily_keep = 30 # Keep 30 days
|
| 38 |
+
|
| 39 |
+
self._lock = asyncio.Lock()
|
| 40 |
+
self._loaded = False
|
| 41 |
+
self._initialized = True
|
| 42 |
+
|
| 43 |
+
async def init(self):
|
| 44 |
+
"""Initialize and load data"""
|
| 45 |
+
if not self._loaded:
|
| 46 |
+
await self._load_data()
|
| 47 |
+
|
| 48 |
+
async def _load_data(self):
|
| 49 |
+
"""Load stats data from disk"""
|
| 50 |
+
if self._loaded:
|
| 51 |
+
return
|
| 52 |
+
|
| 53 |
+
if not self.file_path.exists():
|
| 54 |
+
self._loaded = True
|
| 55 |
+
return
|
| 56 |
+
|
| 57 |
+
try:
|
| 58 |
+
async with self._lock:
|
| 59 |
+
content = await asyncio.to_thread(self.file_path.read_bytes)
|
| 60 |
+
if content:
|
| 61 |
+
data = orjson.loads(content)
|
| 62 |
+
|
| 63 |
+
# Restore defaultdict structure
|
| 64 |
+
self._hourly = defaultdict(lambda: {"total": 0, "success": 0, "failed": 0})
|
| 65 |
+
self._hourly.update(data.get("hourly", {}))
|
| 66 |
+
|
| 67 |
+
self._daily = defaultdict(lambda: {"total": 0, "success": 0, "failed": 0})
|
| 68 |
+
self._daily.update(data.get("daily", {}))
|
| 69 |
+
|
| 70 |
+
self._models = defaultdict(int)
|
| 71 |
+
self._models.update(data.get("models", {}))
|
| 72 |
+
|
| 73 |
+
self._loaded = True
|
| 74 |
+
logger.debug(f"[Stats] Stats data loaded successfully")
|
| 75 |
+
except Exception as e:
|
| 76 |
+
logger.error(f"[Stats] Failed to load data: {e}")
|
| 77 |
+
self._loaded = True # Prevent overwrite
|
| 78 |
+
|
| 79 |
+
async def _save_data(self):
|
| 80 |
+
"""Save stats data to disk"""
|
| 81 |
+
if not self._loaded:
|
| 82 |
+
return
|
| 83 |
+
|
| 84 |
+
try:
|
| 85 |
+
# Ensure directory exists
|
| 86 |
+
self.file_path.parent.mkdir(parents=True, exist_ok=True)
|
| 87 |
+
|
| 88 |
+
async with self._lock:
|
| 89 |
+
data = {
|
| 90 |
+
"hourly": dict(self._hourly),
|
| 91 |
+
"daily": dict(self._daily),
|
| 92 |
+
"models": dict(self._models)
|
| 93 |
+
}
|
| 94 |
+
content = orjson.dumps(data)
|
| 95 |
+
await asyncio.to_thread(self.file_path.write_bytes, content)
|
| 96 |
+
except Exception as e:
|
| 97 |
+
logger.error(f"[Stats] Failed to save data: {e}")
|
| 98 |
+
|
| 99 |
+
async def record_request(self, model: str, success: bool) -> None:
|
| 100 |
+
"""Record a request"""
|
| 101 |
+
if not self._loaded:
|
| 102 |
+
await self.init()
|
| 103 |
+
|
| 104 |
+
now = datetime.now()
|
| 105 |
+
hour_key = now.strftime("%Y-%m-%dT%H")
|
| 106 |
+
day_key = now.strftime("%Y-%m-%d")
|
| 107 |
+
|
| 108 |
+
# Hourly stats
|
| 109 |
+
self._hourly[hour_key]["total"] += 1
|
| 110 |
+
if success:
|
| 111 |
+
self._hourly[hour_key]["success"] += 1
|
| 112 |
+
else:
|
| 113 |
+
self._hourly[hour_key]["failed"] += 1
|
| 114 |
+
|
| 115 |
+
# Daily stats
|
| 116 |
+
self._daily[day_key]["total"] += 1
|
| 117 |
+
if success:
|
| 118 |
+
self._daily[day_key]["success"] += 1
|
| 119 |
+
else:
|
| 120 |
+
self._daily[day_key]["failed"] += 1
|
| 121 |
+
|
| 122 |
+
# Model stats
|
| 123 |
+
self._models[model] += 1
|
| 124 |
+
|
| 125 |
+
# Periodically clean old data
|
| 126 |
+
self._cleanup()
|
| 127 |
+
|
| 128 |
+
# Save asynchronously
|
| 129 |
+
asyncio.create_task(self._save_data())
|
| 130 |
+
|
| 131 |
+
def _cleanup(self) -> None:
|
| 132 |
+
"""Clean expired data"""
|
| 133 |
+
now = datetime.now()
|
| 134 |
+
|
| 135 |
+
# Clean hourly data
|
| 136 |
+
hour_keys = list(self._hourly.keys())
|
| 137 |
+
if len(hour_keys) > self._hourly_keep:
|
| 138 |
+
for key in sorted(hour_keys)[:-self._hourly_keep]:
|
| 139 |
+
del self._hourly[key]
|
| 140 |
+
|
| 141 |
+
# Clean daily data
|
| 142 |
+
day_keys = list(self._daily.keys())
|
| 143 |
+
if len(day_keys) > self._daily_keep:
|
| 144 |
+
for key in sorted(day_keys)[:-self._daily_keep]:
|
| 145 |
+
del self._daily[key]
|
| 146 |
+
|
| 147 |
+
def get_stats(self, hours: int = 24, days: int = 7) -> Dict[str, Any]:
|
| 148 |
+
"""Get stats data"""
|
| 149 |
+
now = datetime.now()
|
| 150 |
+
|
| 151 |
+
# Get last N hours
|
| 152 |
+
hourly_data = []
|
| 153 |
+
for i in range(hours - 1, -1, -1):
|
| 154 |
+
from datetime import timedelta
|
| 155 |
+
dt = now - timedelta(hours=i)
|
| 156 |
+
key = dt.strftime("%Y-%m-%dT%H")
|
| 157 |
+
data = self._hourly.get(key, {"total": 0, "success": 0, "failed": 0})
|
| 158 |
+
hourly_data.append({
|
| 159 |
+
"hour": dt.strftime("%H:00"),
|
| 160 |
+
"date": dt.strftime("%m-%d"),
|
| 161 |
+
**data
|
| 162 |
+
})
|
| 163 |
+
|
| 164 |
+
# Get last N days
|
| 165 |
+
daily_data = []
|
| 166 |
+
for i in range(days - 1, -1, -1):
|
| 167 |
+
from datetime import timedelta
|
| 168 |
+
dt = now - timedelta(days=i)
|
| 169 |
+
key = dt.strftime("%Y-%m-%d")
|
| 170 |
+
data = self._daily.get(key, {"total": 0, "success": 0, "failed": 0})
|
| 171 |
+
daily_data.append({
|
| 172 |
+
"date": dt.strftime("%m-%d"),
|
| 173 |
+
**data
|
| 174 |
+
})
|
| 175 |
+
|
| 176 |
+
# Model stats (Top 10)
|
| 177 |
+
model_data = sorted(self._models.items(), key=lambda x: x[1], reverse=True)[:10]
|
| 178 |
+
|
| 179 |
+
# Totals
|
| 180 |
+
total_requests = sum(d["total"] for d in self._hourly.values())
|
| 181 |
+
total_success = sum(d["success"] for d in self._hourly.values())
|
| 182 |
+
total_failed = sum(d["failed"] for d in self._hourly.values())
|
| 183 |
+
|
| 184 |
+
return {
|
| 185 |
+
"hourly": hourly_data,
|
| 186 |
+
"daily": daily_data,
|
| 187 |
+
"models": [{"model": m, "count": c} for m, c in model_data],
|
| 188 |
+
"summary": {
|
| 189 |
+
"total": total_requests,
|
| 190 |
+
"success": total_success,
|
| 191 |
+
"failed": total_failed,
|
| 192 |
+
"success_rate": round(total_success / total_requests * 100, 1) if total_requests > 0 else 0
|
| 193 |
+
}
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
async def reset(self) -> None:
|
| 197 |
+
"""Reset all stats"""
|
| 198 |
+
self._hourly.clear()
|
| 199 |
+
self._daily.clear()
|
| 200 |
+
self._models.clear()
|
| 201 |
+
await self._save_data()
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
# Global instance
|
| 205 |
+
request_stats = RequestStats()
|
app/template/admin.html
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
app/template/favicon.png
ADDED
|
|
Git LFS Details
|
app/template/login.html
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en" class="h-full">
|
| 3 |
+
|
| 4 |
+
<head>
|
| 5 |
+
<meta charset="UTF-8">
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 7 |
+
<title>Sign In - Grok2API</title>
|
| 8 |
+
<link rel="icon" type="image/png" href="/static/favicon.png">
|
| 9 |
+
<script src="https://cdn.tailwindcss.com"></script>
|
| 10 |
+
<script>
|
| 11 |
+
tailwind.config = { theme: { extend: { colors: { border: "hsl(0 0% 89%)", input: "hsl(0 0% 89%)", ring: "hsl(0 0% 3.9%)", background: "hsl(0 0% 100%)", foreground: "hsl(0 0% 3.9%)", primary: { DEFAULT: "hsl(0 0% 9%)", foreground: "hsl(0 0% 98%)" }, secondary: { DEFAULT: "hsl(0 0% 96.1%)", foreground: "hsl(0 0% 9%)" }, muted: { DEFAULT: "hsl(0 0% 96.1%)", foreground: "hsl(0 0% 45.1%)" }, destructive: { DEFAULT: "hsl(0 84.2% 60.2%)", foreground: "hsl(0 0% 98%)" } } } } }
|
| 12 |
+
</script>
|
| 13 |
+
<style>
|
| 14 |
+
@keyframes slide-up {
|
| 15 |
+
from {
|
| 16 |
+
transform: translateY(100%);
|
| 17 |
+
opacity: 0
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
to {
|
| 21 |
+
transform: translateY(0);
|
| 22 |
+
opacity: 1
|
| 23 |
+
}
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
.animate-slide-up {
|
| 27 |
+
animation: slide-up .3s ease-out
|
| 28 |
+
}
|
| 29 |
+
</style>
|
| 30 |
+
</head>
|
| 31 |
+
|
| 32 |
+
<body class="h-full bg-background text-foreground antialiased">
|
| 33 |
+
<div class="flex min-h-full flex-col justify-center py-12 px-4 sm:px-6 lg:px-8">
|
| 34 |
+
<div class="sm:mx-auto sm:w-full sm:max-w-md">
|
| 35 |
+
<div class="text-center">
|
| 36 |
+
<h1 class="text-4xl font-bold">Grok2API</h1>
|
| 37 |
+
<p class="mt-2 text-sm text-muted-foreground">Admin Console</p>
|
| 38 |
+
</div>
|
| 39 |
+
</div>
|
| 40 |
+
|
| 41 |
+
<div class="sm:mx-auto sm:w-full sm:max-w-md">
|
| 42 |
+
<div class="bg-background py-8 px-4 sm:px-10 rounded-lg">
|
| 43 |
+
<form id="loginForm" class="space-y-6">
|
| 44 |
+
<div class="space-y-2">
|
| 45 |
+
<label for="username" class="text-sm font-medium">Username</label>
|
| 46 |
+
<input type="text" id="username" name="username" required
|
| 47 |
+
class="flex h-10 w-full rounded-md border border-input bg-background px-3 py-2 text-sm placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring disabled:opacity-50"
|
| 48 |
+
placeholder="Enter username">
|
| 49 |
+
</div>
|
| 50 |
+
<div class="space-y-2">
|
| 51 |
+
<label for="password" class="text-sm font-medium">Password</label>
|
| 52 |
+
<input type="password" id="password" name="password" required
|
| 53 |
+
class="flex h-10 w-full rounded-md border border-input bg-background px-3 py-2 text-sm placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring disabled:opacity-50"
|
| 54 |
+
placeholder="Enter password">
|
| 55 |
+
</div>
|
| 56 |
+
<button type="submit" id="loginButton"
|
| 57 |
+
class="inline-flex items-center justify-center rounded-md font-medium transition-colors bg-primary text-primary-foreground hover:bg-primary/90 h-10 w-full disabled:opacity-50">Sign In</button>
|
| 58 |
+
</form>
|
| 59 |
+
|
| 60 |
+
<div class="mt-6 text-center text-xs text-muted-foreground space-y-1">
|
| 61 |
+
<p>Created By Chenyme Β© 2025</p>
|
| 62 |
+
<p>Fork maintained by: @Tomiya233</p>
|
| 63 |
+
</div>
|
| 64 |
+
</div>
|
| 65 |
+
</div>
|
| 66 |
+
</div>
|
| 67 |
+
|
| 68 |
+
<script>
|
| 69 |
+
const form = document.getElementById('loginForm'), btn = document.getElementById('loginButton');
|
| 70 |
+
form.addEventListener('submit', async (e) => { e.preventDefault(); btn.disabled = true; btn.textContent = 'Signing in...'; try { const fd = new FormData(form), r = await fetch('/api/login', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ username: fd.get('username'), password: fd.get('password') }) }); const d = await r.json(); d.success ? (localStorage.setItem('adminToken', d.token), location.href = '/manage') : showToast(d.message || 'Login failed', 'error') } catch (e) { showToast('Network error, please try again later', 'error') } finally { btn.disabled = false; btn.textContent = 'Sign In' } });
|
| 71 |
+
function showToast(m, t = 'error') { const d = document.createElement('div'), bc = { success: 'bg-green-600', error: 'bg-destructive', info: 'bg-primary' }; d.className = `fixed bottom-4 right-4 ${bc[t] || bc.error} text-white px-4 py-2.5 rounded-lg shadow-lg text-sm font-medium z-50 animate-slide-up`; d.textContent = m; document.body.appendChild(d); setTimeout(() => { d.style.opacity = '0'; d.style.transition = 'opacity .3s'; setTimeout(() => d.parentNode && document.body.removeChild(d), 300) }, 2000) }
|
| 72 |
+
window.addEventListener('DOMContentLoaded', () => { const t = localStorage.getItem('adminToken'); t && fetch('/api/stats', { headers: { Authorization: `Bearer ${t}` } }).then(r => { if (r.ok) location.href = '/manage' }) });
|
| 73 |
+
</script>
|
| 74 |
+
</body>
|
| 75 |
+
|
| 76 |
+
</html>
|
data/setting.toml
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[grok]
|
| 2 |
+
api_key = ""
|
| 3 |
+
proxy_url = "http://spsjzabe5k:ygg3h0g2Bseo_UNuC5@dc.decodo.com:10000"
|
| 4 |
+
cache_proxy_url = ""
|
| 5 |
+
cf_clearance = "kZ1kZVJh4EOvAD3dXCZfaVVUrtS1uXF5Mxk_jEiLtIY-1768091538-1.2.1.1-UKQbk_aXR9U27Z2Lby0wmBZKn8Sb8j.EL_RZ2eomgYbjSBzVvlbBW4ttl.Q2rNnTyhTReGjY.f3URr_cH.K82kNHqA6skdnRPGFLh0DyXsoKf273n3u5ibjpNqiCi6pumPJC6Wmzhb2uYM3fYQ_K6kbcuPKui0I5cR4MHx6V.Gooh64lR5VK34jsGIMOIWJ5UkK9WT.LAlAQZceTyBd7rXx6nxxwM6qrD1Zz.1hq_cI"
|
| 6 |
+
x_statsig_id = "ZTpUeXBlRXJyb3I6IENhbm5vdCByZWFkIHByb3BlcnRpZXMgb2YgdW5kZWZpbmVkIChyZWFkaW5nICdjaGlsZE5vZGVzJyk="
|
| 7 |
+
filtered_tags = "xaiartifact,xai:tool_usage_card,grok:render"
|
| 8 |
+
stream_chunk_timeout = 120
|
| 9 |
+
stream_total_timeout = 600
|
| 10 |
+
stream_first_response_timeout = 30
|
| 11 |
+
temporary = true
|
| 12 |
+
show_thinking = true
|
| 13 |
+
dynamic_statsig = true
|
| 14 |
+
proxy_pool_url = ""
|
| 15 |
+
proxy_pool_interval = 300
|
| 16 |
+
retry_status_codes = [ 401, 429,]
|
| 17 |
+
|
| 18 |
+
[global]
|
| 19 |
+
base_url = ""
|
| 20 |
+
log_level = "INFO"
|
| 21 |
+
image_mode = "url"
|
| 22 |
+
admin_password = "admin"
|
| 23 |
+
admin_username = "admin"
|
| 24 |
+
image_cache_max_size_mb = 512
|
| 25 |
+
video_cache_max_size_mb = 1024
|
data/temp/image.temp
ADDED
|
File without changes
|
data/token.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"ssoSuper": {},
|
| 3 |
+
"ssoNormal": {}
|
| 4 |
+
}
|
docker-compose.yml
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
services:
|
| 2 |
+
grok2api:
|
| 3 |
+
image: ghcr.io/chenyme/grok2api:latest
|
| 4 |
+
ports:
|
| 5 |
+
- "8000:8000"
|
| 6 |
+
volumes:
|
| 7 |
+
- grok_data:/app/data
|
| 8 |
+
- ./logs:/app/logs
|
| 9 |
+
environment:
|
| 10 |
+
# ===== Storage mode =====
|
| 11 |
+
# Supports file, mysql, or redis
|
| 12 |
+
- STORAGE_MODE=file
|
| 13 |
+
|
| 14 |
+
# ===== Database =====
|
| 15 |
+
# Only required when STORAGE_MODE=mysql or redis
|
| 16 |
+
# - DATABASE_URL=mysql://user:password@host:3306/grok2api
|
| 17 |
+
# MySQL format: mysql://user:password@host:port/database
|
| 18 |
+
# Redis format: redis://host:port/db or redis://user:password@host:port/db
|
| 19 |
+
|
| 20 |
+
# ===== Worker count =====
|
| 21 |
+
# Default 1. Recommendation: CPU cores * 2. Use MySQL/Redis in multi-process mode.
|
| 22 |
+
- WORKERS=1
|
| 23 |
+
|
| 24 |
+
volumes:
|
| 25 |
+
grok_data:
|
docker-entrypoint.sh
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/sh
|
| 2 |
+
set -e
|
| 3 |
+
|
| 4 |
+
# Persistent data directory (Hugging Face mounts /data)
|
| 5 |
+
if [ -z "${DATA_DIR}" ]; then
|
| 6 |
+
if [ -d "/data" ]; then
|
| 7 |
+
DATA_DIR="/data"
|
| 8 |
+
else
|
| 9 |
+
DATA_DIR="/app/data"
|
| 10 |
+
fi
|
| 11 |
+
elif [ "${DATA_DIR}" = "/data/grok2api" ] && [ -d "/data" ]; then
|
| 12 |
+
# Normalize legacy default to the Space persistent volume root
|
| 13 |
+
DATA_DIR="/data"
|
| 14 |
+
fi
|
| 15 |
+
|
| 16 |
+
echo "[Grok2API] Using DATA_DIR=${DATA_DIR}"
|
| 17 |
+
|
| 18 |
+
# Ensure directories exist
|
| 19 |
+
mkdir -p \
|
| 20 |
+
"$DATA_DIR/temp/image" \
|
| 21 |
+
"$DATA_DIR/temp/video" \
|
| 22 |
+
/app/logs
|
| 23 |
+
|
| 24 |
+
echo "[Grok2API] Checking config files..."
|
| 25 |
+
|
| 26 |
+
# Initialize setting.toml only if missing
|
| 27 |
+
if [ ! -f "$DATA_DIR/setting.toml" ]; then
|
| 28 |
+
echo "[Grok2API] Initializing setting.toml..."
|
| 29 |
+
cat > "$DATA_DIR/setting.toml" << 'EOF'
|
| 30 |
+
[global]
|
| 31 |
+
base_url = "http://localhost:8000"
|
| 32 |
+
log_level = "INFO"
|
| 33 |
+
image_mode = "url"
|
| 34 |
+
admin_password = "admin"
|
| 35 |
+
admin_username = "admin"
|
| 36 |
+
image_cache_max_size_mb = 512
|
| 37 |
+
video_cache_max_size_mb = 1024
|
| 38 |
+
image_download_timeout = 30
|
| 39 |
+
image_download_max_size_mb = 20
|
| 40 |
+
max_upload_concurrency = 20
|
| 41 |
+
max_request_concurrency = 50
|
| 42 |
+
batch_save_interval = 1.0
|
| 43 |
+
batch_save_threshold = 10
|
| 44 |
+
|
| 45 |
+
[grok]
|
| 46 |
+
api_key = ""
|
| 47 |
+
proxy_url = ""
|
| 48 |
+
cache_proxy_url = ""
|
| 49 |
+
cf_clearance = ""
|
| 50 |
+
x_statsig_id = "ZTpUeXBlRXJyb3I6IENhbm5vdCByZWFkIHByb3BlcnRpZXMgb2YgdW5kZWZpbmVkIChyZWFkaW5nICdjaGlsZE5vZGVzJyk="
|
| 51 |
+
dynamic_statsig = true
|
| 52 |
+
filtered_tags = "xaiartifact,xai:tool_usage_card,grok:render"
|
| 53 |
+
stream_chunk_timeout = 120
|
| 54 |
+
stream_total_timeout = 600
|
| 55 |
+
stream_first_response_timeout = 30
|
| 56 |
+
temporary = true
|
| 57 |
+
show_thinking = true
|
| 58 |
+
proxy_pool_url = ""
|
| 59 |
+
proxy_pool_interval = 300
|
| 60 |
+
retry_status_codes = [401, 429]
|
| 61 |
+
EOF
|
| 62 |
+
fi
|
| 63 |
+
|
| 64 |
+
# Initialize token.json only if missing
|
| 65 |
+
if [ ! -f "$DATA_DIR/token.json" ]; then
|
| 66 |
+
echo "[Grok2API] Initializing token.json..."
|
| 67 |
+
echo '{"ssoNormal": {}, "ssoSuper": {}}' > "$DATA_DIR/token.json"
|
| 68 |
+
fi
|
| 69 |
+
|
| 70 |
+
echo "[Grok2API] Config file check completed"
|
| 71 |
+
echo "[Grok2API] Starting application..."
|
| 72 |
+
|
| 73 |
+
# Start the app
|
| 74 |
+
exec "$@"
|
main.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Grok2API"""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
from contextlib import asynccontextmanager
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
from fastapi import FastAPI
|
| 9 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 10 |
+
from fastapi.staticfiles import StaticFiles
|
| 11 |
+
from app.core.logger import logger
|
| 12 |
+
from app.core.exception import register_exception_handlers
|
| 13 |
+
from app.core.storage import storage_manager
|
| 14 |
+
from app.core.config import setting
|
| 15 |
+
from app.services.grok.token import token_manager
|
| 16 |
+
from app.api.v1.chat import router as chat_router
|
| 17 |
+
from app.api.v1.models import router as models_router
|
| 18 |
+
from app.api.v1.images import router as images_router
|
| 19 |
+
from app.api.admin.manage import router as admin_router
|
| 20 |
+
from app.services.mcp import mcp
|
| 21 |
+
|
| 22 |
+
# 0. Compatibility check
|
| 23 |
+
try:
|
| 24 |
+
if sys.platform != 'win32':
|
| 25 |
+
import uvloop
|
| 26 |
+
uvloop.install()
|
| 27 |
+
logger.info("[Grok2API] Enabled uvloop high-performance event loop")
|
| 28 |
+
else:
|
| 29 |
+
logger.info("[Grok2API] Windows system, using default asyncio event loop")
|
| 30 |
+
except ImportError:
|
| 31 |
+
logger.info("[Grok2API] uvloop not installed, using default asyncio event loop")
|
| 32 |
+
|
| 33 |
+
# 1. Create MCP FastAPI app instance
|
| 34 |
+
mcp_app = mcp.http_app(stateless_http=True, transport="streamable-http")
|
| 35 |
+
|
| 36 |
+
# 2. Define app lifespan
|
| 37 |
+
@asynccontextmanager
|
| 38 |
+
async def lifespan(app: FastAPI):
|
| 39 |
+
"""
|
| 40 |
+
Startup order:
|
| 41 |
+
1. Initialize core services (storage, settings, token_manager)
|
| 42 |
+
2. Load token data asynchronously
|
| 43 |
+
3. Start batch save task
|
| 44 |
+
4. Start MCP service lifespan
|
| 45 |
+
|
| 46 |
+
Shutdown order (LIFO):
|
| 47 |
+
1. Stop MCP service lifespan
|
| 48 |
+
2. Stop batch save task and flush data
|
| 49 |
+
3. Close core services
|
| 50 |
+
"""
|
| 51 |
+
# --- Startup ---
|
| 52 |
+
# 1. Initialize core services
|
| 53 |
+
await storage_manager.init()
|
| 54 |
+
|
| 55 |
+
# Set storage on config and token manager
|
| 56 |
+
storage = storage_manager.get_storage()
|
| 57 |
+
setting.set_storage(storage)
|
| 58 |
+
token_manager.set_storage(storage)
|
| 59 |
+
|
| 60 |
+
# 2. Reload config
|
| 61 |
+
await setting.reload()
|
| 62 |
+
data_dir = getattr(storage, "data_dir", None)
|
| 63 |
+
if data_dir:
|
| 64 |
+
logger.info(f"[Storage] Data dir: {data_dir}")
|
| 65 |
+
logger.info(f"[Config] Config path: {setting.config_path}")
|
| 66 |
+
logger.info("[Grok2API] Core services initialized")
|
| 67 |
+
|
| 68 |
+
# 2.5. Initialize proxy pool
|
| 69 |
+
from app.core.proxy_pool import proxy_pool
|
| 70 |
+
proxy_url = setting.grok_config.get("proxy_url", "")
|
| 71 |
+
proxy_pool_url = setting.grok_config.get("proxy_pool_url", "")
|
| 72 |
+
proxy_pool_interval = setting.grok_config.get("proxy_pool_interval", 300)
|
| 73 |
+
proxy_pool.configure(proxy_url, proxy_pool_url, proxy_pool_interval)
|
| 74 |
+
|
| 75 |
+
# 3. Load token data asynchronously
|
| 76 |
+
await token_manager._load_data()
|
| 77 |
+
logger.info(f"[Token] Data path: {token_manager.token_file}")
|
| 78 |
+
logger.info("[Grok2API] Token data loaded")
|
| 79 |
+
|
| 80 |
+
# 3.5. Load API key data
|
| 81 |
+
from app.services.api_keys import api_key_manager
|
| 82 |
+
api_key_manager.set_storage(storage)
|
| 83 |
+
await api_key_manager.init()
|
| 84 |
+
logger.info("[Grok2API] API key data loaded")
|
| 85 |
+
|
| 86 |
+
# 3.6. Load stats and log data
|
| 87 |
+
from app.services.request_stats import request_stats
|
| 88 |
+
from app.services.request_logger import request_logger
|
| 89 |
+
await request_stats.init()
|
| 90 |
+
await request_logger.init()
|
| 91 |
+
logger.info("[Grok2API] Stats and log data loaded")
|
| 92 |
+
|
| 93 |
+
# 4. Start batch save task
|
| 94 |
+
await token_manager.start_batch_save()
|
| 95 |
+
|
| 96 |
+
# 5. Manage MCP service lifespan
|
| 97 |
+
mcp_lifespan_context = mcp_app.lifespan(app)
|
| 98 |
+
await mcp_lifespan_context.__aenter__()
|
| 99 |
+
logger.info("[MCP] MCP service initialized")
|
| 100 |
+
|
| 101 |
+
logger.info("[Grok2API] App started successfully")
|
| 102 |
+
|
| 103 |
+
try:
|
| 104 |
+
yield
|
| 105 |
+
finally:
|
| 106 |
+
# --- Shutdown ---
|
| 107 |
+
# 1. Exit MCP service lifespan
|
| 108 |
+
await mcp_lifespan_context.__aexit__(None, None, None)
|
| 109 |
+
logger.info("[MCP] MCP service shut down")
|
| 110 |
+
|
| 111 |
+
# 2. Stop batch save task and flush data
|
| 112 |
+
await token_manager.shutdown()
|
| 113 |
+
logger.info("[Token] Token manager shut down")
|
| 114 |
+
|
| 115 |
+
# 3. Close core services
|
| 116 |
+
await storage_manager.close()
|
| 117 |
+
logger.info("[Grok2API] App shut down successfully")
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
# Initialize logging
|
| 121 |
+
logger.info("[Grok2API] App is starting...")
|
| 122 |
+
logger.info("[Grok2API] Fork maintained by: @Tomiya233")
|
| 123 |
+
|
| 124 |
+
# Create FastAPI app
|
| 125 |
+
app = FastAPI(
|
| 126 |
+
title="Grok2API",
|
| 127 |
+
description="Grok API conversion service",
|
| 128 |
+
version="1.3.1",
|
| 129 |
+
lifespan=lifespan
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
# Register global exception handlers
|
| 133 |
+
register_exception_handlers(app)
|
| 134 |
+
|
| 135 |
+
# Register routes
|
| 136 |
+
app.include_router(chat_router, prefix="/v1")
|
| 137 |
+
app.include_router(models_router, prefix="/v1")
|
| 138 |
+
app.include_router(images_router)
|
| 139 |
+
app.include_router(admin_router)
|
| 140 |
+
|
| 141 |
+
# Mount static files
|
| 142 |
+
app.mount("/static", StaticFiles(directory="app/template"), name="template")
|
| 143 |
+
|
| 144 |
+
@app.get("/")
|
| 145 |
+
async def root():
|
| 146 |
+
"""Root path"""
|
| 147 |
+
from fastapi.responses import RedirectResponse
|
| 148 |
+
return RedirectResponse(url="/login")
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
@app.get("/health")
|
| 152 |
+
async def health_check():
|
| 153 |
+
"""Health check endpoint"""
|
| 154 |
+
return {
|
| 155 |
+
"status": "healthy",
|
| 156 |
+
"service": "Grok2API",
|
| 157 |
+
"version": "1.0.3"
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
# Mount MCP server
|
| 161 |
+
app.mount("", mcp_app)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
if __name__ == "__main__":
|
| 165 |
+
import uvicorn
|
| 166 |
+
import os
|
| 167 |
+
|
| 168 |
+
# Number of worker processes
|
| 169 |
+
workers = int(os.getenv("WORKERS", "1"))
|
| 170 |
+
|
| 171 |
+
# Hugging Face Spaces (and similar platforms) provide PORT, default to 7860
|
| 172 |
+
port = int(os.getenv("PORT", "7860"))
|
| 173 |
+
|
| 174 |
+
# Warn about multi-worker mode with file storage
|
| 175 |
+
if workers > 1:
|
| 176 |
+
logger.info(
|
| 177 |
+
f"[Grok2API] Multi-process mode enabled (workers={workers}). "
|
| 178 |
+
f"For best stability and performance, Redis or MySQL storage is recommended."
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
# Select event loop
|
| 182 |
+
loop_type = "auto"
|
| 183 |
+
if workers == 1 and sys.platform != "win32":
|
| 184 |
+
try:
|
| 185 |
+
import uvloop
|
| 186 |
+
loop_type = "uvloop"
|
| 187 |
+
except ImportError:
|
| 188 |
+
pass
|
| 189 |
+
|
| 190 |
+
uvicorn.run(
|
| 191 |
+
"main:app",
|
| 192 |
+
host="0.0.0.0",
|
| 193 |
+
port=port,
|
| 194 |
+
workers=workers,
|
| 195 |
+
loop=loop_type,
|
| 196 |
+
)
|
pyproject.toml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "grok2api"
|
| 3 |
+
version = "1.4.3"
|
| 4 |
+
description = "Grok2API rebuilt on FastAPI, adapted to the latest web call format, supporting streaming chat, image generation, image editing, web search, video generation, deep reasoning, concurrent token pool usage, and automatic load balancing."
|
| 5 |
+
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.13"
|
| 7 |
+
dependencies = [
|
| 8 |
+
"aiofiles==25.1.0",
|
| 9 |
+
"aiomysql==0.2.0",
|
| 10 |
+
"curl-cffi==0.13.0",
|
| 11 |
+
"fastapi==0.119.0",
|
| 12 |
+
"pydantic==2.12.2",
|
| 13 |
+
"python-dotenv==1.1.1",
|
| 14 |
+
"redis==6.4.0",
|
| 15 |
+
"requests==2.32.5",
|
| 16 |
+
"starlette==0.48.0",
|
| 17 |
+
"toml==0.10.2",
|
| 18 |
+
"uvloop==0.21.0 ; sys_platform != 'win32'",
|
| 19 |
+
"uvicorn==0.37.0",
|
| 20 |
+
"portalocker==3.0.0",
|
| 21 |
+
"fastmcp==2.12.4",
|
| 22 |
+
"cryptography==46.0.3",
|
| 23 |
+
"orjson==3.11.4",
|
| 24 |
+
"aiohttp==3.13.2",
|
| 25 |
+
"huggingface_hub==0.25.1",
|
| 26 |
+
]
|
readme.md
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Grok2API
|
| 2 |
+
|
| 3 |
+
Grok2API rebuilt on **FastAPI**, fully adapted to the latest web calling format. Supports streaming chat, image generation, image editing, web search, and deep reasoning, with token-pool concurrency and automatic load balancing.
|
| 4 |
+
|
| 5 |
+
## π Fork Enhancements
|
| 6 |
+
|
| 7 |
+
This fork adds:
|
| 8 |
+
|
| 9 |
+
- **Multi-key management and persistence**: Admins can batch create, label, and delete API keys with bulk operations. All keys are persisted across restarts.
|
| 10 |
+
- **Audit logging**: Real-time request logging with file persistence.
|
| 11 |
+
- **Concurrency performance optimization (Critical)**: Rebuilt Grok request/response handling with fully async streaming (`aiter_lines`), eliminating admin panel freezes or slow responses during generation.
|
| 12 |
+
- **Token smart cooldown**: Automatically cools down tokens after failures to avoid repeated use of failing tokens.
|
| 13 |
+
- Normal errors: cooldown for 5 requests
|
| 14 |
+
- 429 + quota: cooldown for 1 hour
|
| 15 |
+
- 429 + no quota: cooldown for 10 hours
|
| 16 |
+
- **One-click refresh for all tokens**: Batch refresh remaining counts with live progress.
|
| 17 |
+
- **Concurrency guard**: Reject duplicate refresh requests while a refresh is running.
|
| 18 |
+
- **Request stats with persistence**: Hourly/daily trends, success rate, model distribution; stored persistently.
|
| 19 |
+
- **Cache preview**: Admin panel preview for cached images/videos.
|
| 20 |
+
|
| 21 |
+
<br>
|
| 22 |
+
|
| 23 |
+
## Usage
|
| 24 |
+
|
| 25 |
+
### Call Limits and Quotas
|
| 26 |
+
|
| 27 |
+
- **Basic**: **80 calls / 20 hours**
|
| 28 |
+
- **Super**: quota TBD (not tested)
|
| 29 |
+
- System automatically load-balances across accounts. Use the **admin panel** to monitor usage and status.
|
| 30 |
+
|
| 31 |
+
### Image Generation
|
| 32 |
+
|
| 33 |
+
- In chat, prompt like βdraw a moonβ to trigger image generation.
|
| 34 |
+
- Returns **two images in Markdown** per request, consuming 4 calls.
|
| 35 |
+
- **Note**: Grok image direct links may return 403. The system caches images locally. You must set `Base Url` correctly so images display.
|
| 36 |
+
|
| 37 |
+
### Video Generation
|
| 38 |
+
|
| 39 |
+
- Use `grok-imagine-0.9` with an image + prompt (same format as OpenAI image analysis).
|
| 40 |
+
- Returns: `<video src="{full_video_url}" controls="controls"></video>`
|
| 41 |
+
- **Note**: Grok video direct links may return 403. The system caches videos locally. You must set `Base Url` correctly so videos display.
|
| 42 |
+
|
| 43 |
+
```
|
| 44 |
+
curl https://your-server/v1/chat/completions \
|
| 45 |
+
-H "Content-Type: application/json" \
|
| 46 |
+
-H "Authorization: Bearer $GROK2API_API_KEY" \
|
| 47 |
+
-d '{
|
| 48 |
+
"model": "grok-imagine-0.9",
|
| 49 |
+
"messages": [
|
| 50 |
+
{
|
| 51 |
+
"role": "user",
|
| 52 |
+
"content": [
|
| 53 |
+
{
|
| 54 |
+
"type": "text",
|
| 55 |
+
"text": "Make the sun rise"
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"type": "image_url",
|
| 59 |
+
"image_url": {
|
| 60 |
+
"url": "https://your-image.jpg"
|
| 61 |
+
}
|
| 62 |
+
}
|
| 63 |
+
]
|
| 64 |
+
}
|
| 65 |
+
]
|
| 66 |
+
}'
|
| 67 |
+
```
|
| 68 |
+
|
| 69 |
+
### About `x_statsig_id`
|
| 70 |
+
|
| 71 |
+
- `x_statsig_id` is Grokβs anti-bot token.
|
| 72 |
+
- **New users should not change it**; keep the default.
|
| 73 |
+
- Attempts to obtain it via Camoufox were dropped because Grok now restricts non-logged-in `x_statsig_id`. A fixed value is used for compatibility.
|
| 74 |
+
|
| 75 |
+
<br>
|
| 76 |
+
|
| 77 |
+
## Deployment
|
| 78 |
+
|
| 79 |
+
### Option 1: Docker Compose (Recommended)
|
| 80 |
+
|
| 81 |
+
Because this fork includes changes, build locally:
|
| 82 |
+
|
| 83 |
+
1. Clone this repo
|
| 84 |
+
```bash
|
| 85 |
+
git clone https://github.com/Tomiya233/grok2api.git
|
| 86 |
+
cd grok2api
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
2. Start the service
|
| 90 |
+
```bash
|
| 91 |
+
docker-compose up -d --build
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
**docker-compose.yml example:**
|
| 95 |
+
```yaml
|
| 96 |
+
services:
|
| 97 |
+
grok2api:
|
| 98 |
+
build: .
|
| 99 |
+
image: grok2api:latest
|
| 100 |
+
container_name: grok2api
|
| 101 |
+
restart: always
|
| 102 |
+
ports:
|
| 103 |
+
- "8000:8000"
|
| 104 |
+
volumes:
|
| 105 |
+
- grok_data:/app/data
|
| 106 |
+
- ./logs:/app/logs
|
| 107 |
+
environment:
|
| 108 |
+
- LOG_LEVEL=INFO
|
| 109 |
+
logging:
|
| 110 |
+
driver: "json-file"
|
| 111 |
+
options:
|
| 112 |
+
max-size: "10m"
|
| 113 |
+
max-file: "3"
|
| 114 |
+
|
| 115 |
+
volumes:
|
| 116 |
+
grok_data:
|
| 117 |
+
```
|
| 118 |
+
|
| 119 |
+
### Option 2: Run with Python
|
| 120 |
+
|
| 121 |
+
**Requirements**: Python 3.10+ (recommend `uv`).
|
| 122 |
+
|
| 123 |
+
1. Install uv
|
| 124 |
+
```bash
|
| 125 |
+
curl -LsSf https://astral.sh/uv/install.sh | sh
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
2. Run the service
|
| 129 |
+
```bash
|
| 130 |
+
# install deps and run
|
| 131 |
+
uv sync
|
| 132 |
+
uv run python main.py
|
| 133 |
+
```
|
| 134 |
+
|
| 135 |
+
The service runs at `http://127.0.0.1:8000` by default.
|
| 136 |
+
|
| 137 |
+
### Environment Variables
|
| 138 |
+
|
| 139 |
+
| Variable | Required | Description | Example |
|
| 140 |
+
|---------------|----------|-----------------------------------------------|---------|
|
| 141 |
+
| STORAGE_MODE | No | Storage mode: file/mysql/redis | file |
|
| 142 |
+
| DATABASE_URL | No | Database URL (required for mysql/redis) | mysql://user:pass@host:3306/db |
|
| 143 |
+
|
| 144 |
+
**Storage modes:**
|
| 145 |
+
- `file`: local file storage (default)
|
| 146 |
+
- `mysql`: MySQL storage, requires DATABASE_URL
|
| 147 |
+
- `redis`: Redis storage, requires DATABASE_URL
|
| 148 |
+
|
| 149 |
+
<br>
|
| 150 |
+
|
| 151 |
+
## API Overview
|
| 152 |
+
|
| 153 |
+
> Fully compatible with OpenAI API. Requests require **Authorization header**.
|
| 154 |
+
|
| 155 |
+
| Method | Endpoint | Description | Auth |
|
| 156 |
+
|--------|---------------------------|-----------------------------------------------|------|
|
| 157 |
+
| POST | `/v1/chat/completions` | Create chat completion (stream/non-stream) | β
|
|
| 158 |
+
| GET | `/v1/models` | List supported models | β
|
|
| 159 |
+
| GET | `/images/{img_path}` | Get generated image file | β |
|
| 160 |
+
|
| 161 |
+
<br>
|
| 162 |
+
|
| 163 |
+
<details>
|
| 164 |
+
<summary>Admin and stats endpoints (expand)</summary>
|
| 165 |
+
|
| 166 |
+
| Method | Endpoint | Description | Auth |
|
| 167 |
+
|--------|----------------------------------|------------------------------------|------|
|
| 168 |
+
| GET | /login | Admin login page | β |
|
| 169 |
+
| GET | /manage | Admin console page | β |
|
| 170 |
+
| POST | /api/login | Admin login | β |
|
| 171 |
+
| POST | /api/logout | Admin logout | β
|
|
| 172 |
+
| GET | /api/tokens | List tokens | β
|
|
| 173 |
+
| POST | /api/tokens/add | Batch add tokens | β
|
|
| 174 |
+
| POST | /api/tokens/delete | Batch delete tokens | β
|
|
| 175 |
+
| GET | /api/settings | Get settings | β
|
|
| 176 |
+
| POST | /api/settings | Update settings | β
|
|
| 177 |
+
| GET | /api/cache/size | Cache size | β
|
|
| 178 |
+
| POST | /api/cache/clear | Clear all cache | β
|
|
| 179 |
+
| POST | /api/cache/clear/images | Clear image cache | β
|
|
| 180 |
+
| POST | /api/cache/clear/videos | Clear video cache | β
|
|
| 181 |
+
| GET | /api/stats | Stats summary | β
|
|
| 182 |
+
| POST | /api/tokens/tags | Update token tags | β
|
|
| 183 |
+
| POST | /api/tokens/note | Update token note | β
|
|
| 184 |
+
| POST | /api/tokens/test | Test token availability | β
|
|
| 185 |
+
| GET | /api/tokens/tags/all | List all tags | β
|
|
| 186 |
+
| GET | /api/storage/mode | Storage mode | β
|
|
| 187 |
+
| POST | /api/tokens/refresh-all | Refresh all token limits | β
|
|
| 188 |
+
| GET | /api/tokens/refresh-progress | Refresh progress | β
|
|
| 189 |
+
| GET | /api/keys | List API keys | β
|
|
| 190 |
+
| POST | /api/keys/add | Create new API key | β
|
|
| 191 |
+
| POST | /api/keys/delete | Delete API key | β
|
|
| 192 |
+
| POST | /api/keys/status | Toggle API key status | β
|
|
| 193 |
+
| POST | /api/keys/name | Update API key name | β
|
|
| 194 |
+
| GET | /api/logs | Get logs (up to 1000) | β
|
|
| 195 |
+
| POST | /api/logs/clear | Clear audit logs | β
|
|
| 196 |
+
|
| 197 |
+
</details>
|
| 198 |
+
|
| 199 |
+
<br>
|
| 200 |
+
|
| 201 |
+
## Available Models
|
| 202 |
+
|
| 203 |
+
| Model | Cost | Account Type | Image Gen/Edit | Deep Thinking | Web Search | Video Gen |
|
| 204 |
+
|----------------------|------|---------------|----------------|---------------|------------|-----------|
|
| 205 |
+
| `grok-4.1` | 1 | Basic/Super | β
| β
| β
| β |
|
| 206 |
+
| `grok-4.1-thinking` | 1 | Basic/Super | β
| β
| β
| β |
|
| 207 |
+
| `grok-imagine-0.9` | - | Basic/Super | β
| β | β | β
|
|
| 208 |
+
| `grok-4-fast` | 1 | Basic/Super | β
| β
| β
| β |
|
| 209 |
+
| `grok-4-fast-expert` | 4 | Basic/Super | β
| β
| β
| β |
|
| 210 |
+
| `grok-4-expert` | 4 | Basic/Super | β
| β
| β
| β |
|
| 211 |
+
| `grok-4-heavy` | 1 | Super | β
| β
| β
| β |
|
| 212 |
+
| `grok-3-fast` | 1 | Basic/Super | β
| β | β
| β |
|
| 213 |
+
|
| 214 |
+
<br>
|
| 215 |
+
|
| 216 |
+
## Config Parameters
|
| 217 |
+
|
| 218 |
+
> After starting the service, log in at `/login` to configure settings.
|
| 219 |
+
|
| 220 |
+
| Parameter | Scope | Required | Description | Default |
|
| 221 |
+
|------------------------------|--------|----------|----------------------------------------------|---------|
|
| 222 |
+
| admin_username | global | No | Admin username | "admin" |
|
| 223 |
+
| admin_password | global | No | Admin password | "admin" |
|
| 224 |
+
| log_level | global | No | Log level: DEBUG/INFO/... | "INFO" |
|
| 225 |
+
| image_mode | global | No | Image return mode: url/base64 | "url" |
|
| 226 |
+
| image_cache_max_size_mb | global | No | Image cache max size (MB) | 512 |
|
| 227 |
+
| video_cache_max_size_mb | global | No | Video cache max size (MB) | 1024 |
|
| 228 |
+
| base_url | global | No | Base URL for service and image links | "" |
|
| 229 |
+
| api_key | grok | No | API key (optional, for extra security) | "" |
|
| 230 |
+
| proxy_url | grok | No | HTTP proxy URL | "" |
|
| 231 |
+
| stream_chunk_timeout | grok | No | Stream chunk timeout (seconds) | 120 |
|
| 232 |
+
| stream_first_response_timeout| grok | No | First response timeout (seconds) | 30 |
|
| 233 |
+
| stream_total_timeout | grok | No | Total stream timeout (seconds) | 600 |
|
| 234 |
+
| cf_clearance | grok | No | Cloudflare clearance token | "" |
|
| 235 |
+
| x_statsig_id | grok | Yes | Anti-bot identifier | "ZTpUeXBlRXJyb3I6IENhbm5vdCByZWFkIHByb3BlcnRpZXMgb2YgdW5kZWZpbmVkIChyZWFkaW5nICdjaGlsZE5vZGVzJyk=" |
|
| 236 |
+
| filtered_tags | grok | No | Filtered tags (comma-separated) | "xaiartifact,xai:tool_usage_card,grok:render" |
|
| 237 |
+
| show_thinking | grok | No | Show thinking: true/false | true |
|
| 238 |
+
| temporary | grok | No | Session mode: true/false | true |
|
| 239 |
+
|
| 240 |
+
<br>
|
| 241 |
+
|
| 242 |
+
## β οΈ Notes
|
| 243 |
+
|
| 244 |
+
This project is for learning and research only. Please comply with applicable terms.
|
| 245 |
+
|
| 246 |
+
<br>
|
| 247 |
+
|
| 248 |
+
> Rebuilt with guidance from: [LINUX DO](https://linux.do), [VeroFess/grok2api](https://github.com/VeroFess/grok2api), [xLmiler/grok2api_python](https://github.com/xLmiler/grok2api_python)
|
requirements.txt
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
toml==0.10.2
|
| 2 |
+
fastapi==0.119.0
|
| 3 |
+
uvicorn==0.37.0
|
| 4 |
+
uvloop==0.21.0; sys_platform != 'win32'
|
| 5 |
+
python-dotenv==1.1.1
|
| 6 |
+
curl_cffi==0.13.0
|
| 7 |
+
requests==2.32.5
|
| 8 |
+
starlette==0.48.0
|
| 9 |
+
pydantic==2.12.2
|
| 10 |
+
aiofiles==25.1.0
|
| 11 |
+
portalocker==3.0.0
|
| 12 |
+
aiomysql==0.2.0
|
| 13 |
+
redis==6.4.0
|
| 14 |
+
fastmcp==2.12.4
|
| 15 |
+
cryptography==46.0.3
|
| 16 |
+
orjson==3.11.4
|
| 17 |
+
aiohttp==3.13.2
|
| 18 |
+
huggingface_hub==0.25.1
|
test/test_concurrency.py
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Grok2API concurrency performance test script
|
| 4 |
+
|
| 5 |
+
Test API performance under different concurrency levels
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import asyncio
|
| 9 |
+
import aiohttp
|
| 10 |
+
import time
|
| 11 |
+
import statistics
|
| 12 |
+
import argparse
|
| 13 |
+
from datetime import datetime
|
| 14 |
+
from typing import List, Dict, Any
|
| 15 |
+
import json
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class ConcurrencyTester:
|
| 19 |
+
"""Concurrency tester"""
|
| 20 |
+
|
| 21 |
+
def __init__(self, base_url: str, api_key: str = None):
|
| 22 |
+
self.base_url = base_url.rstrip('/')
|
| 23 |
+
self.api_key = api_key
|
| 24 |
+
self.results: List[Dict[str, Any]] = []
|
| 25 |
+
|
| 26 |
+
async def test_request(self, session: aiohttp.ClientSession, request_id: int) -> Dict[str, Any]:
|
| 27 |
+
"""Send a single test request"""
|
| 28 |
+
url = f"{self.base_url}/v1/chat/completions"
|
| 29 |
+
|
| 30 |
+
headers = {
|
| 31 |
+
"Content-Type": "application/json"
|
| 32 |
+
}
|
| 33 |
+
if self.api_key:
|
| 34 |
+
headers["Authorization"] = f"Bearer {self.api_key}"
|
| 35 |
+
|
| 36 |
+
payload = {
|
| 37 |
+
"model": "grok-3-fast",
|
| 38 |
+
"messages": [
|
| 39 |
+
{"role": "user", "content": f"Test request #{request_id}, reply OK briefly"}
|
| 40 |
+
],
|
| 41 |
+
"stream": False,
|
| 42 |
+
"max_tokens": 10
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
start_time = time.time()
|
| 46 |
+
|
| 47 |
+
try:
|
| 48 |
+
async with session.post(url, json=payload, headers=headers, timeout=30) as response:
|
| 49 |
+
status = response.status
|
| 50 |
+
|
| 51 |
+
if status == 200:
|
| 52 |
+
data = await response.json()
|
| 53 |
+
elapsed = time.time() - start_time
|
| 54 |
+
|
| 55 |
+
return {
|
| 56 |
+
"id": request_id,
|
| 57 |
+
"status": "success",
|
| 58 |
+
"http_status": status,
|
| 59 |
+
"elapsed": elapsed,
|
| 60 |
+
"response_length": len(json.dumps(data))
|
| 61 |
+
}
|
| 62 |
+
else:
|
| 63 |
+
elapsed = time.time() - start_time
|
| 64 |
+
error_text = await response.text()
|
| 65 |
+
|
| 66 |
+
return {
|
| 67 |
+
"id": request_id,
|
| 68 |
+
"status": "error",
|
| 69 |
+
"http_status": status,
|
| 70 |
+
"elapsed": elapsed,
|
| 71 |
+
"error": error_text[:200]
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
except asyncio.TimeoutError:
|
| 75 |
+
elapsed = time.time() - start_time
|
| 76 |
+
return {
|
| 77 |
+
"id": request_id,
|
| 78 |
+
"status": "timeout",
|
| 79 |
+
"elapsed": elapsed,
|
| 80 |
+
"error": "Request timeout"
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
except Exception as e:
|
| 84 |
+
elapsed = time.time() - start_time
|
| 85 |
+
return {
|
| 86 |
+
"id": request_id,
|
| 87 |
+
"status": "exception",
|
| 88 |
+
"elapsed": elapsed,
|
| 89 |
+
"error": str(e)
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
async def run_concurrent_test(self, concurrency: int, total_requests: int):
|
| 93 |
+
"""Run concurrency test"""
|
| 94 |
+
print(f"\n{'='*60}")
|
| 95 |
+
print(f"π Test config: concurrency {concurrency}, total requests {total_requests}")
|
| 96 |
+
print(f"{'='*60}")
|
| 97 |
+
|
| 98 |
+
connector = aiohttp.TCPConnector(limit=concurrency, limit_per_host=concurrency)
|
| 99 |
+
timeout = aiohttp.ClientTimeout(total=60)
|
| 100 |
+
|
| 101 |
+
async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
|
| 102 |
+
# Warm-up
|
| 103 |
+
print("π₯ Warming up...")
|
| 104 |
+
await self.test_request(session, 0)
|
| 105 |
+
|
| 106 |
+
# Start test
|
| 107 |
+
print("π Starting concurrency test...")
|
| 108 |
+
start_time = time.time()
|
| 109 |
+
|
| 110 |
+
# Create tasks
|
| 111 |
+
tasks = []
|
| 112 |
+
for i in range(1, total_requests + 1):
|
| 113 |
+
task = asyncio.create_task(self.test_request(session, i))
|
| 114 |
+
tasks.append(task)
|
| 115 |
+
|
| 116 |
+
# Control concurrency
|
| 117 |
+
if len(tasks) >= concurrency:
|
| 118 |
+
results = await asyncio.gather(*tasks)
|
| 119 |
+
self.results.extend(results)
|
| 120 |
+
tasks = []
|
| 121 |
+
|
| 122 |
+
# Show progress
|
| 123 |
+
print(f" Progress: {i}/{total_requests} ({i/total_requests*100:.1f}%)", end='\r')
|
| 124 |
+
|
| 125 |
+
# Handle remaining tasks
|
| 126 |
+
if tasks:
|
| 127 |
+
results = await asyncio.gather(*tasks)
|
| 128 |
+
self.results.extend(results)
|
| 129 |
+
|
| 130 |
+
total_time = time.time() - start_time
|
| 131 |
+
|
| 132 |
+
# Stats and output
|
| 133 |
+
self.print_statistics(concurrency, total_requests, total_time)
|
| 134 |
+
|
| 135 |
+
def print_statistics(self, concurrency: int, total_requests: int, total_time: float):
|
| 136 |
+
"""Print statistics"""
|
| 137 |
+
success_results = [r for r in self.results if r["status"] == "success"]
|
| 138 |
+
error_results = [r for r in self.results if r["status"] != "success"]
|
| 139 |
+
|
| 140 |
+
success_count = len(success_results)
|
| 141 |
+
error_count = len(error_results)
|
| 142 |
+
|
| 143 |
+
if success_results:
|
| 144 |
+
latencies = [r["elapsed"] for r in success_results]
|
| 145 |
+
avg_latency = statistics.mean(latencies)
|
| 146 |
+
min_latency = min(latencies)
|
| 147 |
+
max_latency = max(latencies)
|
| 148 |
+
p50_latency = statistics.median(latencies)
|
| 149 |
+
p95_latency = sorted(latencies)[int(len(latencies) * 0.95)] if len(latencies) > 1 else latencies[0]
|
| 150 |
+
p99_latency = sorted(latencies)[int(len(latencies) * 0.99)] if len(latencies) > 1 else latencies[0]
|
| 151 |
+
else:
|
| 152 |
+
avg_latency = min_latency = max_latency = p50_latency = p95_latency = p99_latency = 0
|
| 153 |
+
|
| 154 |
+
throughput = total_requests / total_time if total_time > 0 else 0
|
| 155 |
+
|
| 156 |
+
print(f"\n\n{'='*60}")
|
| 157 |
+
print("π Test result statistics")
|
| 158 |
+
print(f"{'='*60}")
|
| 159 |
+
print(f" Test time: {total_time:.2f}s")
|
| 160 |
+
print(f" Total requests: {total_requests}")
|
| 161 |
+
print(f" Concurrency: {concurrency}")
|
| 162 |
+
print(f"")
|
| 163 |
+
print(f" Successful requests: {success_count} ({success_count/total_requests*100:.1f}%)")
|
| 164 |
+
print(f" Failed requests: {error_count} ({error_count/total_requests*100:.1f}%)")
|
| 165 |
+
print(f"")
|
| 166 |
+
print(f" Throughput: {throughput:.2f} req/s")
|
| 167 |
+
print(f"")
|
| 168 |
+
print(" Latency stats:")
|
| 169 |
+
print(f" Min: {min_latency*1000:.0f}ms")
|
| 170 |
+
print(f" Avg: {avg_latency*1000:.0f}ms")
|
| 171 |
+
print(f" Max: {max_latency*1000:.0f}ms")
|
| 172 |
+
print(f" P50: {p50_latency*1000:.0f}ms")
|
| 173 |
+
print(f" P95: {p95_latency*1000:.0f}ms")
|
| 174 |
+
print(f" P99: {p99_latency*1000:.0f}ms")
|
| 175 |
+
|
| 176 |
+
# Error details
|
| 177 |
+
if error_results:
|
| 178 |
+
print("\n β οΈ Error details:")
|
| 179 |
+
error_types = {}
|
| 180 |
+
for r in error_results:
|
| 181 |
+
error_type = r.get("status", "unknown")
|
| 182 |
+
error_types[error_type] = error_types.get(error_type, 0) + 1
|
| 183 |
+
|
| 184 |
+
for error_type, count in error_types.items():
|
| 185 |
+
print(f" {error_type}: {count}")
|
| 186 |
+
|
| 187 |
+
print(f"{'='*60}\n")
|
| 188 |
+
|
| 189 |
+
# Performance rating
|
| 190 |
+
self.print_performance_rating(throughput, avg_latency)
|
| 191 |
+
|
| 192 |
+
def print_performance_rating(self, throughput: float, avg_latency: float):
|
| 193 |
+
"""Print performance rating"""
|
| 194 |
+
print("π― Performance rating:")
|
| 195 |
+
|
| 196 |
+
# Throughput rating
|
| 197 |
+
if throughput >= 100:
|
| 198 |
+
rating = "βββββ Excellent"
|
| 199 |
+
elif throughput >= 60:
|
| 200 |
+
rating = "ββββ Good"
|
| 201 |
+
elif throughput >= 30:
|
| 202 |
+
rating = "βββ Medium"
|
| 203 |
+
elif throughput >= 10:
|
| 204 |
+
rating = "ββ Low"
|
| 205 |
+
else:
|
| 206 |
+
rating = "β Needs improvement"
|
| 207 |
+
|
| 208 |
+
print(f" Throughput ({throughput:.1f} req/s): {rating}")
|
| 209 |
+
|
| 210 |
+
# Latency rating
|
| 211 |
+
if avg_latency < 0.5:
|
| 212 |
+
rating = "βββββ Excellent"
|
| 213 |
+
elif avg_latency < 1.0:
|
| 214 |
+
rating = "ββββ Good"
|
| 215 |
+
elif avg_latency < 2.0:
|
| 216 |
+
rating = "βββ Medium"
|
| 217 |
+
elif avg_latency < 5.0:
|
| 218 |
+
rating = "ββ High"
|
| 219 |
+
else:
|
| 220 |
+
rating = "β Needs improvement"
|
| 221 |
+
|
| 222 |
+
print(f" Avg latency ({avg_latency*1000:.0f}ms): {rating}")
|
| 223 |
+
print()
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
async def main():
|
| 227 |
+
"""Main function"""
|
| 228 |
+
parser = argparse.ArgumentParser(description='Grok2API concurrency performance test')
|
| 229 |
+
parser.add_argument('--url', default='http://localhost:8000', help='API base URL')
|
| 230 |
+
parser.add_argument('--key', default='', help='API key (optional)')
|
| 231 |
+
parser.add_argument('-c', '--concurrency', type=int, default=10, help='Concurrency')
|
| 232 |
+
parser.add_argument('-n', '--requests', type=int, default=50, help='Total requests')
|
| 233 |
+
parser.add_argument('--multi-test', action='store_true', help='Run multi-level concurrency tests')
|
| 234 |
+
|
| 235 |
+
args = parser.parse_args()
|
| 236 |
+
|
| 237 |
+
print(f"""
|
| 238 |
+
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 239 |
+
β Grok2API Concurrency Test Tool β
|
| 240 |
+
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 241 |
+
|
| 242 |
+
π Target: {args.url}
|
| 243 |
+
π API Key: {'Set' if args.key else 'Not set'}
|
| 244 |
+
β° Start time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
| 245 |
+
""")
|
| 246 |
+
|
| 247 |
+
tester = ConcurrencyTester(args.url, args.key)
|
| 248 |
+
|
| 249 |
+
if args.multi_test:
|
| 250 |
+
# Multi-level concurrency tests
|
| 251 |
+
test_configs = [
|
| 252 |
+
(5, 20), # 5 concurrency, 20 requests
|
| 253 |
+
(10, 50), # 10 concurrency, 50 requests
|
| 254 |
+
(20, 100), # 20 concurrency, 100 requests
|
| 255 |
+
(50, 200), # 50 concurrency, 200 requests
|
| 256 |
+
]
|
| 257 |
+
|
| 258 |
+
for concurrency, requests in test_configs:
|
| 259 |
+
tester.results = [] # Clear results
|
| 260 |
+
await tester.run_concurrent_test(concurrency, requests)
|
| 261 |
+
await asyncio.sleep(2) # 2-second interval
|
| 262 |
+
else:
|
| 263 |
+
# Single test
|
| 264 |
+
await tester.run_concurrent_test(args.concurrency, args.requests)
|
| 265 |
+
|
| 266 |
+
print("\nβ
Test completed!")
|
| 267 |
+
print(f"β° End time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
if __name__ == "__main__":
|
| 271 |
+
try:
|
| 272 |
+
asyncio.run(main())
|
| 273 |
+
except KeyboardInterrupt:
|
| 274 |
+
print("\n\nβ οΈ Test interrupted by user")
|
| 275 |
+
except Exception as e:
|
| 276 |
+
print(f"\n\nβ Test failed: {e}")
|
test/test_concurrency.sh
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Grok2API concurrency test script (Shell version)
|
| 4 |
+
# Uses curl and GNU parallel for concurrent testing
|
| 5 |
+
|
| 6 |
+
set -e
|
| 7 |
+
|
| 8 |
+
# Configuration
|
| 9 |
+
BASE_URL="${BASE_URL:-http://localhost:8000}"
|
| 10 |
+
API_KEY="${API_KEY:-}"
|
| 11 |
+
CONCURRENCY="${CONCURRENCY:-10}"
|
| 12 |
+
TOTAL_REQUESTS="${TOTAL_REQUESTS:-50}"
|
| 13 |
+
|
| 14 |
+
# Colors
|
| 15 |
+
RED='\033[0;31m'
|
| 16 |
+
GREEN='\033[0;32m'
|
| 17 |
+
YELLOW='\033[1;33m'
|
| 18 |
+
BLUE='\033[0;34m'
|
| 19 |
+
NC='\033[0m' # No Color
|
| 20 |
+
|
| 21 |
+
echo -e "${BLUE}ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ${NC}"
|
| 22 |
+
echo -e "${BLUE}β Grok2API Concurrency Test Tool (Shell) β${NC}"
|
| 23 |
+
echo -e "${BLUE}ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ${NC}"
|
| 24 |
+
echo ""
|
| 25 |
+
echo -e "${GREEN}π Target:${NC} $BASE_URL"
|
| 26 |
+
echo -e "${GREEN}π API Key:${NC} ${API_KEY:-(Not set)}"
|
| 27 |
+
echo -e "${GREEN}π Concurrency:${NC} $CONCURRENCY"
|
| 28 |
+
echo -e "${GREEN}π Total requests:${NC} $TOTAL_REQUESTS"
|
| 29 |
+
echo ""
|
| 30 |
+
|
| 31 |
+
# Check dependencies
|
| 32 |
+
if ! command -v curl &> /dev/null; then
|
| 33 |
+
echo -e "${RED}β Error: curl is required${NC}"
|
| 34 |
+
exit 1
|
| 35 |
+
fi
|
| 36 |
+
|
| 37 |
+
# Create temp directory
|
| 38 |
+
TMP_DIR=$(mktemp -d)
|
| 39 |
+
trap "rm -rf $TMP_DIR" EXIT
|
| 40 |
+
|
| 41 |
+
# Single request function
|
| 42 |
+
test_request() {
|
| 43 |
+
local request_id=$1
|
| 44 |
+
local start_time=$(date +%s.%N)
|
| 45 |
+
|
| 46 |
+
# Build request
|
| 47 |
+
local headers="Content-Type: application/json"
|
| 48 |
+
if [ -n "$API_KEY" ]; then
|
| 49 |
+
headers="${headers}\nAuthorization: Bearer ${API_KEY}"
|
| 50 |
+
fi
|
| 51 |
+
|
| 52 |
+
local response=$(curl -s -w "\n%{http_code}\n%{time_total}" \
|
| 53 |
+
-X POST "${BASE_URL}/v1/chat/completions" \
|
| 54 |
+
-H "Content-Type: application/json" \
|
| 55 |
+
${API_KEY:+-H "Authorization: Bearer $API_KEY"} \
|
| 56 |
+
-d "{
|
| 57 |
+
\"model\": \"grok-3-fast\",
|
| 58 |
+
\"messages\": [{\"role\": \"user\", \"content\": \"Test request #${request_id}, reply OK briefly\"}],
|
| 59 |
+
\"stream\": false,
|
| 60 |
+
\"max_tokens\": 10
|
| 61 |
+
}" 2>&1)
|
| 62 |
+
|
| 63 |
+
local http_code=$(echo "$response" | tail -n 2 | head -n 1)
|
| 64 |
+
local time_total=$(echo "$response" | tail -n 1)
|
| 65 |
+
|
| 66 |
+
# Record result
|
| 67 |
+
echo "${request_id},${http_code},${time_total}" >> "$TMP_DIR/results.csv"
|
| 68 |
+
|
| 69 |
+
# Show progress
|
| 70 |
+
echo -ne "\r Progress: ${request_id}/${TOTAL_REQUESTS}"
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
# Export function for parallel
|
| 74 |
+
export -f test_request
|
| 75 |
+
export BASE_URL API_KEY TMP_DIR
|
| 76 |
+
|
| 77 |
+
# Clear results file
|
| 78 |
+
echo "id,status,time" > "$TMP_DIR/results.csv"
|
| 79 |
+
|
| 80 |
+
echo -e "${YELLOW}π Starting concurrency test...${NC}"
|
| 81 |
+
START_TIME=$(date +%s.%N)
|
| 82 |
+
|
| 83 |
+
# Use GNU parallel if available, otherwise a simple loop
|
| 84 |
+
if command -v parallel &> /dev/null; then
|
| 85 |
+
seq 1 $TOTAL_REQUESTS | parallel -j $CONCURRENCY test_request {}
|
| 86 |
+
else
|
| 87 |
+
# Simple background task concurrency
|
| 88 |
+
for i in $(seq 1 $TOTAL_REQUESTS); do
|
| 89 |
+
test_request $i &
|
| 90 |
+
|
| 91 |
+
# Control concurrency
|
| 92 |
+
if (( i % CONCURRENCY == 0 )); then
|
| 93 |
+
wait
|
| 94 |
+
fi
|
| 95 |
+
done
|
| 96 |
+
wait
|
| 97 |
+
fi
|
| 98 |
+
|
| 99 |
+
END_TIME=$(date +%s.%N)
|
| 100 |
+
TOTAL_TIME=$(echo "$END_TIME - $START_TIME" | bc)
|
| 101 |
+
|
| 102 |
+
echo -e "\n"
|
| 103 |
+
|
| 104 |
+
# Results summary
|
| 105 |
+
echo -e "${BLUE}βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ${NC}"
|
| 106 |
+
echo -e "${BLUE}π Test result summary${NC}"
|
| 107 |
+
echo -e "${BLUE}βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ${NC}"
|
| 108 |
+
|
| 109 |
+
# Count success/failure
|
| 110 |
+
SUCCESS_COUNT=$(awk -F',' '$2 == 200 {count++} END {print count+0}' "$TMP_DIR/results.csv")
|
| 111 |
+
ERROR_COUNT=$((TOTAL_REQUESTS - SUCCESS_COUNT))
|
| 112 |
+
|
| 113 |
+
echo -e " Test time: ${TOTAL_TIME}s"
|
| 114 |
+
echo -e " Total requests: ${TOTAL_REQUESTS}"
|
| 115 |
+
echo -e " Concurrency: ${CONCURRENCY}"
|
| 116 |
+
echo ""
|
| 117 |
+
echo -e " Successful requests: ${GREEN}${SUCCESS_COUNT}${NC} ($(echo "scale=1; $SUCCESS_COUNT * 100 / $TOTAL_REQUESTS" | bc)%)"
|
| 118 |
+
echo -e " Failed requests: ${RED}${ERROR_COUNT}${NC} ($(echo "scale=1; $ERROR_COUNT * 100 / $TOTAL_REQUESTS" | bc)%)"
|
| 119 |
+
echo ""
|
| 120 |
+
|
| 121 |
+
# Calculate throughput
|
| 122 |
+
THROUGHPUT=$(echo "scale=2; $TOTAL_REQUESTS / $TOTAL_TIME" | bc)
|
| 123 |
+
echo -e " Throughput: ${GREEN}${THROUGHPUT}${NC} req/s"
|
| 124 |
+
echo ""
|
| 125 |
+
|
| 126 |
+
# Latency stats (successful requests only)
|
| 127 |
+
if [ $SUCCESS_COUNT -gt 0 ]; then
|
| 128 |
+
echo -e " Latency stats:"
|
| 129 |
+
|
| 130 |
+
# Extract latencies for successful requests
|
| 131 |
+
awk -F',' '$2 == 200 {print $3}' "$TMP_DIR/results.csv" | sort -n > "$TMP_DIR/latencies.txt"
|
| 132 |
+
|
| 133 |
+
MIN=$(head -n 1 "$TMP_DIR/latencies.txt" | awk '{printf "%.0f", $1*1000}')
|
| 134 |
+
MAX=$(tail -n 1 "$TMP_DIR/latencies.txt" | awk '{printf "%.0f", $1*1000}')
|
| 135 |
+
AVG=$(awk '{sum+=$1; count++} END {printf "%.0f", sum/count*1000}' "$TMP_DIR/latencies.txt")
|
| 136 |
+
|
| 137 |
+
# P50
|
| 138 |
+
P50_LINE=$((SUCCESS_COUNT / 2))
|
| 139 |
+
P50=$(sed -n "${P50_LINE}p" "$TMP_DIR/latencies.txt" | awk '{printf "%.0f", $1*1000}')
|
| 140 |
+
|
| 141 |
+
# P95
|
| 142 |
+
P95_LINE=$(echo "scale=0; $SUCCESS_COUNT * 0.95 / 1" | bc)
|
| 143 |
+
P95=$(sed -n "${P95_LINE}p" "$TMP_DIR/latencies.txt" | awk '{printf "%.0f", $1*1000}')
|
| 144 |
+
|
| 145 |
+
# P99
|
| 146 |
+
P99_LINE=$(echo "scale=0; $SUCCESS_COUNT * 0.99 / 1" | bc)
|
| 147 |
+
P99=$(sed -n "${P99_LINE}p" "$TMP_DIR/latencies.txt" | awk '{printf "%.0f", $1*1000}')
|
| 148 |
+
|
| 149 |
+
echo -e " Min: ${MIN}ms"
|
| 150 |
+
echo -e " Avg: ${AVG}ms"
|
| 151 |
+
echo -e " Max: ${MAX}ms"
|
| 152 |
+
echo -e " P50: ${P50}ms"
|
| 153 |
+
echo -e " P95: ${P95}ms"
|
| 154 |
+
echo -e " P99: ${P99}ms"
|
| 155 |
+
fi
|
| 156 |
+
|
| 157 |
+
echo -e "${BLUE}βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ${NC}"
|
| 158 |
+
|
| 159 |
+
# Performance rating
|
| 160 |
+
echo -e "${YELLOW}π― Performance rating:${NC}"
|
| 161 |
+
|
| 162 |
+
if (( $(echo "$THROUGHPUT >= 100" | bc -l) )); then
|
| 163 |
+
RATING="βββββ Excellent"
|
| 164 |
+
elif (( $(echo "$THROUGHPUT >= 60" | bc -l) )); then
|
| 165 |
+
RATING="ββββ Good"
|
| 166 |
+
elif (( $(echo "$THROUGHPUT >= 30" | bc -l) )); then
|
| 167 |
+
RATING="βββ Medium"
|
| 168 |
+
elif (( $(echo "$THROUGHPUT >= 10" | bc -l) )); then
|
| 169 |
+
RATING="ββ Low"
|
| 170 |
+
else
|
| 171 |
+
RATING="β Needs improvement"
|
| 172 |
+
fi
|
| 173 |
+
|
| 174 |
+
echo -e " Throughput (${THROUGHPUT} req/s): ${RATING}"
|
| 175 |
+
|
| 176 |
+
echo ""
|
| 177 |
+
echo -e "${GREEN}β
Test completed!${NC}"
|
test_key.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import json
|
| 3 |
+
import uuid
|
| 4 |
+
|
| 5 |
+
# ================= Configuration =================
|
| 6 |
+
# 1. Create a new key in the admin UI under [Key Management]
|
| 7 |
+
# 2. Paste the new sk-... below
|
| 8 |
+
API_KEY = "YOUR_NEW_API_KEY"
|
| 9 |
+
BASE_URL = "http://127.0.0.1:8000"
|
| 10 |
+
# =============================================
|
| 11 |
+
|
| 12 |
+
def test_chat_completion():
|
| 13 |
+
print(f"Starting key test: {API_KEY[:10]}...")
|
| 14 |
+
|
| 15 |
+
url = f"{BASE_URL}/v1/chat/completions"
|
| 16 |
+
headers = {
|
| 17 |
+
"Authorization": f"Bearer {API_KEY}",
|
| 18 |
+
"Content-Type": "application/json"
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
payload = {
|
| 22 |
+
"model": "grok-4-fast",
|
| 23 |
+
"messages": [
|
| 24 |
+
{"role": "user", "content": "Hello, who are you? Tell me a joke."}
|
| 25 |
+
],
|
| 26 |
+
"stream": False
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
try:
|
| 30 |
+
response = requests.post(url, headers=headers, json=payload, timeout=30)
|
| 31 |
+
print(f"Status code: {response.status_code}")
|
| 32 |
+
|
| 33 |
+
if response.status_code == 200:
|
| 34 |
+
result = response.json()
|
| 35 |
+
content = result['choices'][0]['message']['content']
|
| 36 |
+
print("--- Response OK ---")
|
| 37 |
+
print(content)
|
| 38 |
+
print("---------------")
|
| 39 |
+
print("Test passed! Check the admin UI [Audit Logs] to confirm the request was recorded.")
|
| 40 |
+
else:
|
| 41 |
+
print(f"Request failed: {response.text}")
|
| 42 |
+
|
| 43 |
+
except Exception as e:
|
| 44 |
+
print(f"Error occurred: {e}")
|
| 45 |
+
|
| 46 |
+
if __name__ == "__main__":
|
| 47 |
+
if API_KEY == "YOUR_NEW_API_KEY":
|
| 48 |
+
print("Please replace API_KEY with the key you just created.")
|
| 49 |
+
else:
|
| 50 |
+
test_chat_completion()
|
uv.lock
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|