CSDDSFSFSAFSAF commited on
Commit
6f44b37
·
verified ·
1 Parent(s): 59fdf06

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +35 -0
  2. assets/grpo_csv.png +3 -0
  3. assets/teaser.png +3 -0
  4. clip_as_service/Dockerfiles/base.Dockerfile +35 -0
  5. clip_as_service/Dockerfiles/cuda.Dockerfile +45 -0
  6. clip_as_service/Dockerfiles/server.Dockerfile +50 -0
  7. clip_as_service/Dockerfiles/tensorrt.Dockerfile +38 -0
  8. clip_as_service/client/clip_client/__init__.py +10 -0
  9. clip_as_service/client/clip_client/client.py +890 -0
  10. clip_as_service/client/clip_client/helper.py +54 -0
  11. clip_as_service/client/setup.py +93 -0
  12. clip_as_service/docs/Makefile +19 -0
  13. clip_as_service/docs/_static/JCloud-dark.svg +13 -0
  14. clip_as_service/docs/_static/JCloud-light.svg +13 -0
  15. clip_as_service/docs/_static/banner.png +3 -0
  16. clip_as_service/docs/_static/cas-dark.svg +13 -0
  17. clip_as_service/docs/_static/cas-grafana.json +900 -0
  18. clip_as_service/docs/_static/cas-light.svg +13 -0
  19. clip_as_service/docs/_static/colab-banner.png +3 -0
  20. clip_as_service/docs/_static/demo-embed.html +213 -0
  21. clip_as_service/docs/_static/demo-text-rank.html +245 -0
  22. clip_as_service/docs/_static/docarray-dark.svg +11 -0
  23. clip_as_service/docs/_static/docarray-light.svg +11 -0
  24. clip_as_service/docs/_static/favicon.png +3 -0
  25. clip_as_service/docs/_static/finetuner-dark.svg +3 -0
  26. clip_as_service/docs/_static/finetuner-light.svg +3 -0
  27. clip_as_service/docs/_static/hub-dark.svg +6 -0
  28. clip_as_service/docs/_static/hub-light.svg +6 -0
  29. clip_as_service/docs/_static/logo-dark.svg +20 -0
  30. clip_as_service/docs/_static/logo-light.svg +43 -0
  31. clip_as_service/docs/_static/main.css +175 -0
  32. clip_as_service/docs/_static/now-dark.svg +13 -0
  33. clip_as_service/docs/_static/now-light.svg +13 -0
  34. clip_as_service/docs/_static/search-dark.svg +7 -0
  35. clip_as_service/docs/_static/search-light.svg +7 -0
  36. clip_as_service/docs/_templates/page.html +230 -0
  37. clip_as_service/docs/_templates/sidebar/brand.html +41 -0
  38. clip_as_service/docs/_templates/sidebar/navigation.html +35 -0
  39. clip_as_service/docs/changelog/index.md +22 -0
  40. clip_as_service/docs/conf.py +220 -0
  41. clip_as_service/docs/hosting/by-jina.md +70 -0
  42. clip_as_service/docs/hosting/cas-on-colab.svg +1 -0
  43. clip_as_service/docs/hosting/colab-banner.png +3 -0
  44. clip_as_service/docs/hosting/colab.md +37 -0
  45. clip_as_service/docs/hosting/jc-deploy.png +3 -0
  46. clip_as_service/docs/hosting/on-jcloud.md +78 -0
  47. clip_as_service/docs/html_extra/robots.txt +2 -0
  48. clip_as_service/docs/index.md +176 -0
  49. clip_as_service/docs/makedoc.sh +7 -0
  50. clip_as_service/docs/playground/embedding.md +16 -0
.gitattributes CHANGED
@@ -27,3 +27,38 @@
27
 
28
  # Jupyter notebook
29
  *.ipynb text eol=lf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  # Jupyter notebook
29
  *.ipynb text eol=lf
30
+ clip_as_service/docs/hosting/jc-deploy.png filter=lfs diff=lfs merge=lfs -text
31
+ clip_as_service/docs/hosting/colab-banner.png filter=lfs diff=lfs merge=lfs -text
32
+ clip_as_service/docs/_static/banner.png filter=lfs diff=lfs merge=lfs -text
33
+ clip_as_service/docs/_static/colab-banner.png filter=lfs diff=lfs merge=lfs -text
34
+ clip_as_service/docs/_static/favicon.png filter=lfs diff=lfs merge=lfs -text
35
+ clip_as_service/docs/user-guides/images/retreival.png filter=lfs diff=lfs merge=lfs -text
36
+ clip_as_service/docs/user-guides/images/server-start-monitoring.gif filter=lfs diff=lfs merge=lfs -text
37
+ clip_as_service/docs/user-guides/images/memory_usage_dim_512.png filter=lfs diff=lfs merge=lfs -text
38
+ clip_as_service/docs/user-guides/images/client-pgbar.gif filter=lfs diff=lfs merge=lfs -text
39
+ clip_as_service/docs/user-guides/images/server-start.gif filter=lfs diff=lfs merge=lfs -text
40
+ time_r1/trainer/__pycache__/grpo_trainer_env_origin.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
41
+ time_r1/trainer/__pycache__/grpo_trainer_env_no_vllm_latest.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
42
+ time_r1/trainer/__pycache__/grpo_trainer_env.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
43
+ time_r1/trainer/__pycache__/grpo_trainer_env_no_vllm.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
44
+ time_r1/trainer/__pycache__/grpo_trainer_env_stage_1.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
45
+ time_r1/trainer/__pycache__/grpo_trainer_env_no_vllm_latest_data.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
46
+ clip_as_service/docs/user-guides/images/memory_usage_dim_128.png filter=lfs diff=lfs merge=lfs -text
47
+ clip_as_service/docs/user-guides/images/grafana-dashboard.png filter=lfs diff=lfs merge=lfs -text
48
+ clip_as_service/docs/user-guides/images/polling_stratey.png filter=lfs diff=lfs merge=lfs -text
49
+ clip_as_service/tests/img/00001.jpg filter=lfs diff=lfs merge=lfs -text
50
+ clip_as_service/tests/img/00004.jpg filter=lfs diff=lfs merge=lfs -text
51
+ clip_as_service/tests/img/00003.jpg filter=lfs diff=lfs merge=lfs -text
52
+ clip_as_service/tests/img/00002.jpg filter=lfs diff=lfs merge=lfs -text
53
+ clip_as_service/docs/user-guides/images/server-log.gif filter=lfs diff=lfs merge=lfs -text
54
+ clip_as_service/tests/img/00000.jpg filter=lfs diff=lfs merge=lfs -text
55
+ clip_as_service/server/build/lib/clip_server/resources/bpe_simple_vocab_16e6.txt.gz filter=lfs diff=lfs merge=lfs -text
56
+ clip_as_service/server/clip_server/resources/bpe_simple_vocab_16e6.txt.gz filter=lfs diff=lfs merge=lfs -text
57
+ data_prepare/node-v24.12.0-linux-x64.tar.xz filter=lfs diff=lfs merge=lfs -text
58
+ tool_output_reflect.jsonl filter=lfs diff=lfs merge=lfs -text
59
+ assets/grpo_csv.png filter=lfs diff=lfs merge=lfs -text
60
+ siglip-so400m-patch14-384/spiece.model filter=lfs diff=lfs merge=lfs -text
61
+ assets/teaser.png filter=lfs diff=lfs merge=lfs -text
62
+ data_prepare/node-v24.12.0-linux-x64/bin/node filter=lfs diff=lfs merge=lfs -text
63
+ data_prepare/node-v24.12.0-linux-x64/lib/node_modules/npm/node_modules/retry/equation.gif filter=lfs diff=lfs merge=lfs -text
64
+ data_prepare/node-v24.12.0-linux-x64/lib/node_modules/npm/node_modules/qrcode-terminal/example/basic.png filter=lfs diff=lfs merge=lfs -text
assets/grpo_csv.png ADDED

Git LFS Details

  • SHA256: 3b668980d3b76ae1a18fde6a001206943761deacd7ea54cb7d8c17bc7235b0d5
  • Pointer size: 131 Bytes
  • Size of remote file: 685 kB
assets/teaser.png ADDED

Git LFS Details

  • SHA256: e00d1483db55ca7d852a6c601b108b590a2cfc281e7cb83831bac1640e1b6f48
  • Pointer size: 132 Bytes
  • Size of remote file: 3.87 MB
clip_as_service/Dockerfiles/base.Dockerfile ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !!! An ARG declared before a FROM is outside of a build stage, so it can’t be used in any instruction after a FROM
2
+ ARG JINA_VERSION=3.11.0
3
+
4
+ FROM jinaai/jina:${JINA_VERSION}-py38-standard
5
+
6
+ ARG BACKEND_TAG=torch
7
+
8
+ # constant, wont invalidate cache
9
+ LABEL org.opencontainers.image.vendor="Jina AI Limited" \
10
+ org.opencontainers.image.licenses="Apache 2.0" \
11
+ org.opencontainers.image.title="CLIP-as-Service" \
12
+ org.opencontainers.image.description="Embed images and sentences into fixed-length vectors with CLIP" \
13
+ org.opencontainers.image.authors="hello@jina.ai" \
14
+ org.opencontainers.image.url="clip-as-service" \
15
+ org.opencontainers.image.documentation="https://clip-as-service.jina.ai/"
16
+
17
+ RUN pip3 install --no-cache-dir torch torchvision torchaudio transformers --extra-index-url https://download.pytorch.org/whl/cpu
18
+
19
+ # copy will almost always invalid the cache
20
+ COPY . /cas/
21
+
22
+ WORKDIR /cas
23
+
24
+ RUN if [ "${BACKEND_TAG}" != "torch" ]; then python3 -m pip install --no-cache-dir "./[${BACKEND_TAG}]" ; fi \
25
+ && python3 -m pip install --no-cache-dir .
26
+
27
+ RUN echo "\
28
+ jtype: CLIPEncoder\n\
29
+ metas:\n\
30
+ py_modules:\n\
31
+ - clip_server.executors.clip_$BACKEND_TAG\n\
32
+ " > /tmp/config.yml
33
+
34
+
35
+ ENTRYPOINT ["jina", "executor", "--uses", "/tmp/config.yml", "--timeout-ready", "3000000"]
clip_as_service/Dockerfiles/cuda.Dockerfile ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG CUDA_VERSION=11.4.2
2
+
3
+ FROM nvcr.io/nvidia/cuda:${CUDA_VERSION}-cudnn8-runtime-ubuntu20.04
4
+ ENV DEBIAN_FRONTEND=noninteractive
5
+
6
+ ARG JINA_VERSION=3.11.0
7
+ ARG BACKEND_TAG=torch
8
+
9
+ # constant, wont invalidate cache
10
+ LABEL org.opencontainers.image.vendor="Jina AI Limited" \
11
+ org.opencontainers.image.licenses="Apache 2.0" \
12
+ org.opencontainers.image.title="CLIP-as-Service" \
13
+ org.opencontainers.image.description="Embed images and sentences into fixed-length vectors with CLIP" \
14
+ org.opencontainers.image.authors="hello@jina.ai" \
15
+ org.opencontainers.image.url="clip-as-service" \
16
+ org.opencontainers.image.documentation="https://clip-as-service.jina.ai/"
17
+
18
+ RUN apt-get update && apt-get install -y --no-install-recommends \
19
+ python3-setuptools python3-wheel python3-pip \
20
+ && apt-get clean && rm -rf /var/lib/apt/lists/*;
21
+
22
+ RUN python3 -m pip install --default-timeout=1000 --no-cache-dir torch torchvision torchaudio nvidia-pyindex transformers --extra-index-url https://download.pytorch.org/whl/cu113
23
+ RUN python3 -m pip install --default-timeout=1000 --no-cache-dir "jina[standard]==${JINA_VERSION}"
24
+
25
+ # copy will almost always invalid the cache
26
+ COPY . /cas/
27
+
28
+ WORKDIR /cas
29
+
30
+ RUN if [ "${BACKEND_TAG}" != "torch" ]; then python3 -m pip install --no-cache-dir "./[${BACKEND_TAG}]" ; fi \
31
+ && python3 -m pip install --no-cache-dir .
32
+
33
+ RUN echo "\
34
+ jtype: CLIPEncoder\n\
35
+ metas:\n\
36
+ py_modules:\n\
37
+ - clip_server.executors.clip_$BACKEND_TAG\n\
38
+ " > /tmp/config.yml
39
+
40
+ ENTRYPOINT ["jina", "executor", "--uses", "/tmp/config.yml", "--timeout-ready", "3000000"]
41
+
42
+
43
+
44
+
45
+
clip_as_service/Dockerfiles/server.Dockerfile ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG CUDA_VERSION=11.6.0
2
+
3
+ FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04
4
+
5
+ ARG CAS_NAME=cas
6
+ WORKDIR /${CAS_NAME}
7
+
8
+ ENV PIP_NO_CACHE_DIR=1 \
9
+ PIP_DISABLE_PIP_VERSION_CHECK=1
10
+
11
+ # constant, wont invalidate cache
12
+ LABEL org.opencontainers.image.vendor="Jina AI Limited" \
13
+ org.opencontainers.image.licenses="Apache 2.0" \
14
+ org.opencontainers.image.title="CLIP-as-Service" \
15
+ org.opencontainers.image.description="Embed images and sentences into fixed-length vectors with CLIP" \
16
+ org.opencontainers.image.authors="hello@jina.ai" \
17
+ org.opencontainers.image.url="clip-as-service" \
18
+ org.opencontainers.image.documentation="https://clip-as-service.jina.ai/"
19
+
20
+
21
+ RUN apt-get update \
22
+ && apt-get install -y --no-install-recommends python3 python3-pip wget \
23
+ && ln -sf python3 /usr/bin/python \
24
+ && ln -sf pip3 /usr/bin/pip \
25
+ && pip install --upgrade pip \
26
+ && pip install wheel setuptools nvidia-pyindex \
27
+ && pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
28
+
29
+ COPY server ./server
30
+ # given by builder
31
+ ARG PIP_TAG
32
+ RUN pip install --default-timeout=1000 --compile ./server/ \
33
+ && if [ -n "${PIP_TAG}" ]; then pip install --default-timeout=1000 --compile "./server[${PIP_TAG}]" ; fi
34
+
35
+ ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64
36
+
37
+ ARG USER_ID=1000
38
+ ARG GROUP_ID=1000
39
+ ARG USER_NAME=${CAS_NAME}
40
+ ARG GROUP_NAME=${CAS_NAME}
41
+
42
+ RUN groupadd -g ${GROUP_ID} ${USER_NAME} &&\
43
+ useradd -l -u ${USER_ID} -g ${USER_NAME} ${GROUP_NAME} &&\
44
+ mkdir /home/${USER_NAME} &&\
45
+ chown ${USER_NAME}:${GROUP_NAME} /home/${USER_NAME} &&\
46
+ chown -R ${USER_NAME}:${GROUP_NAME} /${CAS_NAME}/
47
+
48
+ USER ${USER_NAME}
49
+
50
+ ENTRYPOINT ["python", "-m", "clip_server"]
clip_as_service/Dockerfiles/tensorrt.Dockerfile ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dockerfile to run Clip-as-Service with TensorRT, CUDA integration
2
+
3
+ ARG TENSORRT_VERSION=22.04
4
+
5
+ FROM nvcr.io/nvidia/tensorrt:${TENSORRT_VERSION}-py3
6
+
7
+ ARG JINA_VERSION=3.7.0
8
+ ARG BACKEND_TAG=tensorrt
9
+
10
+ # constant, wont invalidate cache
11
+ LABEL org.opencontainers.image.vendor="Jina AI Limited" \
12
+ org.opencontainers.image.licenses="Apache 2.0" \
13
+ org.opencontainers.image.title="CLIP-as-Service" \
14
+ org.opencontainers.image.description="Embed images and sentences into fixed-length vectors with CLIP" \
15
+ org.opencontainers.image.authors="hello@jina.ai" \
16
+ org.opencontainers.image.url="clip-as-service" \
17
+ org.opencontainers.image.documentation="https://clip-as-service.jina.ai/"
18
+
19
+ RUN pip3 install --default-timeout=1000 --no-cache-dir torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113
20
+ RUN pip3 -m pip install --default-timeout=1000 --no-cache-dir "jina[standard]==${JINA_VERSION}"
21
+
22
+ # copy will almost always invalid the cache
23
+ COPY . /cas/
24
+ WORKDIR /cas
25
+
26
+ RUN python3 -m pip install --no-cache-dir "./[$BACKEND_TAG]"
27
+
28
+
29
+ RUN echo "\
30
+ jtype: CLIPEncoder\n\
31
+ metas:\n\
32
+ py_modules:\n\
33
+ - clip_server.executors.clip_$BACKEND_TAG\n\
34
+ " > /tmp/config.yml
35
+
36
+
37
+ ENTRYPOINT ["jina", "executor", "--uses", "/tmp/config.yml"]
38
+
clip_as_service/client/clip_client/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ __version__ = '0.8.4'
2
+
3
+ import os
4
+
5
+ from clip_client.client import Client
6
+
7
+ if 'NO_VERSION_CHECK' not in os.environ:
8
+ from clip_client.helper import is_latest_version
9
+
10
+ is_latest_version(github_repo='clip-as-service')
clip_as_service/client/clip_client/client.py ADDED
@@ -0,0 +1,890 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mimetypes
2
+ import os
3
+ import time
4
+ import warnings
5
+ from typing import (
6
+ overload,
7
+ TYPE_CHECKING,
8
+ Optional,
9
+ Union,
10
+ Iterator,
11
+ Generator,
12
+ Iterable,
13
+ Dict,
14
+ )
15
+ from urllib.parse import urlparse
16
+ from functools import partial
17
+ from docarray import DocumentArray
18
+
19
+ if TYPE_CHECKING:
20
+ import numpy as np
21
+ from docarray import Document
22
+ from jina.clients.base import CallbackFnType
23
+
24
+
25
+ class Client:
26
+ def __init__(self, server: str, credential: dict = {}, **kwargs):
27
+ """Create a Clip client object that connects to the Clip server.
28
+ Server scheme is in the format of ``scheme://netloc:port``, where
29
+ - scheme: one of grpc, websocket, http, grpcs, websockets, https
30
+ - netloc: the server ip address or hostname
31
+ - port: the public port of the server
32
+ :param server: the server URI
33
+ :param credential: the credential for authentication ``{'Authentication': '<token>'}``
34
+ """
35
+ try:
36
+ r = urlparse(server)
37
+ _port = r.port
38
+ self._scheme = r.scheme
39
+ except:
40
+ raise ValueError(f'{server} is not a valid scheme')
41
+
42
+ _tls = False
43
+ if self._scheme in ('grpcs', 'https', 'wss'):
44
+ self._scheme = self._scheme[:-1]
45
+ _tls = True
46
+
47
+ if self._scheme == 'ws':
48
+ self._scheme = 'websocket' # temp fix for the core
49
+ if credential:
50
+ warnings.warn(
51
+ 'Credential is not supported for websocket, please use grpc or http'
52
+ )
53
+
54
+ if self._scheme in ('grpc', 'http', 'websocket'):
55
+ _kwargs = dict(host=r.hostname, port=_port, protocol=self._scheme, tls=_tls)
56
+
57
+ from jina import Client
58
+
59
+ self._client = Client(**_kwargs)
60
+ self._async_client = Client(**_kwargs, asyncio=True)
61
+ else:
62
+ raise ValueError(f'{server} is not a valid scheme')
63
+
64
+ self._authorization = credential.get(
65
+ 'Authorization', os.environ.get('CLIP_AUTH_TOKEN')
66
+ )
67
+
68
+ def profile(self, content: Optional[str] = '') -> Dict[str, float]:
69
+ """Profiling a single query's roundtrip including network and computation latency. Results is summarized in a table.
70
+ :param content: the content to be sent for profiling. By default it sends an empty Document
71
+ that helps you understand the network latency.
72
+ :return: the latency report in a dict.
73
+ """
74
+ st = time.perf_counter()
75
+ r = self._client.post(
76
+ '/', self._iter_doc([content], DocumentArray()), return_responses=True
77
+ )
78
+ ed = (time.perf_counter() - st) * 1000
79
+ route = r[0].routes
80
+ gateway_time = (
81
+ route[0].end_time.ToMilliseconds() - route[0].start_time.ToMilliseconds()
82
+ )
83
+ clip_time = (
84
+ route[1].end_time.ToMilliseconds() - route[1].start_time.ToMilliseconds()
85
+ )
86
+ network_time = ed - gateway_time
87
+ server_network = gateway_time - clip_time
88
+
89
+ from rich.table import Table
90
+
91
+ def make_table(_title, _time, _percent):
92
+ table = Table(show_header=False, box=None)
93
+ table.add_row(
94
+ _title, f'[b]{_time:.0f}[/b]ms', f'[dim]{_percent * 100:.0f}%[/dim]'
95
+ )
96
+ return table
97
+
98
+ from rich.tree import Tree
99
+
100
+ t = Tree(make_table('Roundtrip', ed, 1))
101
+ t.add(make_table('Client-server network', network_time, network_time / ed))
102
+ t2 = t.add(make_table('Server', gateway_time, gateway_time / ed))
103
+ t2.add(
104
+ make_table(
105
+ 'Gateway-CLIP network', server_network, server_network / gateway_time
106
+ )
107
+ )
108
+ t2.add(make_table('CLIP model', clip_time, clip_time / gateway_time))
109
+
110
+ from rich import print
111
+
112
+ print(t)
113
+
114
+ return {
115
+ 'Roundtrip': ed,
116
+ 'Client-server network': network_time,
117
+ 'Server': gateway_time,
118
+ 'Gateway-CLIP network': server_network,
119
+ 'CLIP model': clip_time,
120
+ }
121
+
122
+ def _update_pbar(self, response, func: Optional['CallbackFnType'] = None):
123
+ from rich import filesize
124
+
125
+ r = response.data.docs
126
+ if not self._pbar._tasks[self._r_task].started:
127
+ self._pbar.start_task(self._r_task)
128
+ self._pbar.update(
129
+ self._r_task,
130
+ advance=len(r),
131
+ total_size=str(
132
+ filesize.decimal(int(os.environ.get('JINA_GRPC_RECV_BYTES', '0')))
133
+ ),
134
+ )
135
+ if func is not None:
136
+ func(response)
137
+
138
+ def _prepare_streaming(self, disable, total):
139
+ if total is None:
140
+ total = 500
141
+ warnings.warn(
142
+ 'The length of the input is unknown, the progressbar would not be accurate.'
143
+ )
144
+ elif total > 500:
145
+ warnings.warn(
146
+ 'Please ensure all the inputs are valid, otherwise the request will be aborted.'
147
+ )
148
+
149
+ from docarray.array.mixins.io.pbar import get_pbar
150
+
151
+ self._pbar = get_pbar(disable)
152
+
153
+ os.environ['JINA_GRPC_SEND_BYTES'] = '0'
154
+ os.environ['JINA_GRPC_RECV_BYTES'] = '0'
155
+
156
+ self._r_task = self._pbar.add_task(
157
+ ':arrow_down: Progress', total=total, total_size=0, start=False
158
+ )
159
+
160
+ @staticmethod
161
+ def _gather_result(
162
+ response, results: 'DocumentArray', attribute: Optional[str] = None
163
+ ):
164
+ r = response.data.docs
165
+ if attribute:
166
+ results[r[:, 'id']][:, attribute] = r[:, attribute]
167
+
168
+ def _iter_doc(
169
+ self, content, results: Optional['DocumentArray'] = None
170
+ ) -> Generator['Document', None, None]:
171
+ from docarray import Document
172
+
173
+ for c in content:
174
+ if isinstance(c, str):
175
+ _mime = mimetypes.guess_type(c)[0]
176
+ if _mime and _mime.startswith('image'):
177
+ d = Document(
178
+ uri=c,
179
+ ).load_uri_to_blob()
180
+ else:
181
+ d = Document(text=c)
182
+ elif isinstance(c, Document):
183
+ if c.content_type in ('text', 'blob'):
184
+ d = c
185
+ elif not c.blob and c.uri:
186
+ c.load_uri_to_blob()
187
+ d = c
188
+ elif c.tensor is not None:
189
+ d = c
190
+ else:
191
+ raise TypeError(f'unsupported input type {c!r} {c.content_type}')
192
+ else:
193
+ raise TypeError(f'unsupported input type {c!r}')
194
+
195
+ if results is not None:
196
+ results.append(d)
197
+ yield d
198
+
199
+ def _get_post_payload(
200
+ self, content, results: Optional['DocumentArray'] = None, **kwargs
201
+ ):
202
+ payload = dict(
203
+ inputs=self._iter_doc(content, results),
204
+ request_size=kwargs.get('batch_size', 8),
205
+ total_docs=len(content) if hasattr(content, '__len__') else None,
206
+ )
207
+
208
+ if self._scheme == 'grpc' and self._authorization:
209
+ payload.update(metadata=(('authorization', self._authorization),))
210
+ elif self._scheme == 'http' and self._authorization:
211
+ payload.update(headers={'Authorization': self._authorization})
212
+ return payload
213
+
214
+ @staticmethod
215
+ def _unboxed_result(results: Optional['DocumentArray'] = None, unbox: bool = False):
216
+ if results is not None:
217
+ if results.embeddings is None:
218
+ raise ValueError(
219
+ 'Empty embedding returned from the server. '
220
+ 'This often due to a mis-config of the server, '
221
+ 'restarting the server or changing the serving port number often solves the problem'
222
+ )
223
+ return results.embeddings if unbox else results
224
+
225
+ @overload
226
+ def encode(
227
+ self,
228
+ content: Iterable[str],
229
+ *,
230
+ batch_size: Optional[int] = None,
231
+ show_progress: bool = False,
232
+ parameters: Optional[dict] = None,
233
+ on_done: Optional['CallbackFnType'] = None,
234
+ on_error: Optional['CallbackFnType'] = None,
235
+ on_always: Optional['CallbackFnType'] = None,
236
+ prefetch: int = 100,
237
+ ) -> 'np.ndarray':
238
+ """Encode images and texts into embeddings where the input is an iterable of raw strings.
239
+ Each image and text must be represented as a string. The following strings are acceptable:
240
+ - local image filepath, will be considered as an image
241
+ - remote image http/https, will be considered as an image
242
+ - a dataURI, will be considered as an image
243
+ - plain text, will be considered as a sentence
244
+ :param content: an iterator of image URIs or sentences, each element is an image or a text sentence as a string.
245
+ :param batch_size: the number of elements in each request when sending ``content``
246
+ :param show_progress: if set, show a progress bar
247
+ :param parameters: the parameters for the encoding, you can specify the model to use when you have multiple models
248
+ :param on_done: the callback function executed while streaming, after successful completion of each request.
249
+ It takes the response ``DataRequest`` as the only argument
250
+ :param on_error: the callback function executed while streaming, after failed completion of each request.
251
+ It takes the response ``DataRequest`` as the only argument
252
+ :param on_always: the callback function executed while streaming, after completion of each request.
253
+ It takes the response ``DataRequest`` as the only argument
254
+ :param prefetch: the number of in-flight batches made by the post() method. Use a lower value for expensive
255
+ operations, and a higher value for faster response times
256
+ :return: the embedding in a numpy ndarray with shape ``[N, D]``. ``N`` is in the same length of ``content``
257
+ """
258
+ ...
259
+
260
+ @overload
261
+ def encode(
262
+ self,
263
+ content: Union['DocumentArray', Iterable['Document']],
264
+ *,
265
+ batch_size: Optional[int] = None,
266
+ show_progress: bool = False,
267
+ parameters: Optional[dict] = None,
268
+ on_done: Optional['CallbackFnType'] = None,
269
+ on_error: Optional['CallbackFnType'] = None,
270
+ on_always: Optional['CallbackFnType'] = None,
271
+ prefetch: int = 100,
272
+ ) -> 'DocumentArray':
273
+ """Encode images and texts into embeddings where the input is an iterable of :class:`docarray.Document`.
274
+ :param content: an iterable of :class:`docarray.Document`, each Document must be filled with `.uri`, `.text` or `.blob`.
275
+ :param batch_size: the number of elements in each request when sending ``content``
276
+ :param show_progress: if set, show a progress bar
277
+ :param parameters: the parameters for the encoding, you can specify the model to use when you have multiple models
278
+ :param on_done: the callback function executed while streaming, after successful completion of each request.
279
+ It takes the response ``DataRequest`` as the only argument
280
+ :param on_error: the callback function executed while streaming, after failed completion of each request.
281
+ It takes the response ``DataRequest`` as the only argument
282
+ :param on_always: the callback function executed while streaming, after completion of each request.
283
+ It takes the response ``DataRequest`` as the only argument
284
+ :param prefetch: the number of in-flight batches made by the post() method. Use a lower value for expensive
285
+ operations, and a higher value for faster response times
286
+ :return: the embedding in a numpy ndarray with shape ``[N, D]``. ``N`` is in the same length of ``content``
287
+ """
288
+ ...
289
+
290
+ def encode(self, content, **kwargs):
291
+ if isinstance(content, str):
292
+ raise TypeError(
293
+ f'Content must be an Iterable of [str, Document], try `.encode(["{content}"])` instead'
294
+ )
295
+ if hasattr(content, '__len__') and len(content) == 0:
296
+ return DocumentArray() if isinstance(content, DocumentArray) else []
297
+
298
+ self._prepare_streaming(
299
+ not kwargs.get('show_progress'),
300
+ total=len(content) if hasattr(content, '__len__') else None,
301
+ )
302
+ on_done = kwargs.pop('on_done', None)
303
+ on_error = kwargs.pop('on_error', None)
304
+ on_always = kwargs.pop('on_always', None)
305
+ prefetch = kwargs.pop('prefetch', 100)
306
+ results = DocumentArray() if not on_done and not on_always else None
307
+ if not on_done:
308
+ on_done = partial(
309
+ self._gather_result, results=results, attribute='embedding'
310
+ )
311
+
312
+ with self._pbar:
313
+ parameters = kwargs.pop('parameters', {})
314
+ parameters['drop_image_content'] = parameters.get(
315
+ 'drop_image_content', True
316
+ )
317
+ model_name = parameters.pop('model_name', '') if parameters else ''
318
+
319
+ self._client.post(
320
+ on=f'/encode/{model_name}'.rstrip('/'),
321
+ **self._get_post_payload(content, results, **kwargs),
322
+ on_done=on_done,
323
+ on_error=on_error,
324
+ on_always=partial(self._update_pbar, func=on_always),
325
+ parameters=parameters,
326
+ prefetch=prefetch,
327
+ )
328
+
329
+ unbox = hasattr(content, '__len__') and isinstance(content[0], str)
330
+ return self._unboxed_result(results, unbox)
331
+
332
+ @overload
333
+ async def aencode(
334
+ self,
335
+ content: Iterator[str],
336
+ *,
337
+ batch_size: Optional[int] = None,
338
+ show_progress: bool = False,
339
+ parameters: Optional[dict] = None,
340
+ on_done: Optional['CallbackFnType'] = None,
341
+ on_error: Optional['CallbackFnType'] = None,
342
+ on_always: Optional['CallbackFnType'] = None,
343
+ prefetch: int = 100,
344
+ ) -> 'np.ndarray':
345
+ ...
346
+
347
+ @overload
348
+ async def aencode(
349
+ self,
350
+ content: Union['DocumentArray', Iterable['Document']],
351
+ *,
352
+ batch_size: Optional[int] = None,
353
+ show_progress: bool = False,
354
+ parameters: Optional[dict] = None,
355
+ on_done: Optional['CallbackFnType'] = None,
356
+ on_error: Optional['CallbackFnType'] = None,
357
+ on_always: Optional['CallbackFnType'] = None,
358
+ prefetch: int = 100,
359
+ ) -> 'DocumentArray':
360
+ ...
361
+
362
+ async def aencode(self, content, **kwargs):
363
+ if isinstance(content, str):
364
+ raise TypeError(
365
+ f'Content must be an Iterable of [str, Document], try `.aencode(["{content}"])` instead'
366
+ )
367
+ if hasattr(content, '__len__') and len(content) == 0:
368
+ return DocumentArray() if isinstance(content, DocumentArray) else []
369
+
370
+ self._prepare_streaming(
371
+ not kwargs.get('show_progress'),
372
+ total=len(content) if hasattr(content, '__len__') else None,
373
+ )
374
+ on_done = kwargs.pop('on_done', None)
375
+ on_error = kwargs.pop('on_error', None)
376
+ on_always = kwargs.pop('on_always', None)
377
+ prefetch = kwargs.pop('prefetch', 100)
378
+ results = DocumentArray() if not on_done and not on_always else None
379
+ if not on_done:
380
+ on_done = partial(
381
+ self._gather_result, results=results, attribute='embedding'
382
+ )
383
+
384
+ with self._pbar:
385
+ parameters = kwargs.pop('parameters', {})
386
+ parameters['drop_image_content'] = parameters.get(
387
+ 'drop_image_content', True
388
+ )
389
+ model_name = parameters.get('model_name', '') if parameters else ''
390
+
391
+ async for _ in self._async_client.post(
392
+ on=f'/encode/{model_name}'.rstrip('/'),
393
+ **self._get_post_payload(content, results, **kwargs),
394
+ on_done=on_done,
395
+ on_error=on_error,
396
+ on_always=partial(self._update_pbar, func=on_always),
397
+ parameters=parameters,
398
+ prefetch=prefetch,
399
+ ):
400
+ continue
401
+
402
+ unbox = hasattr(content, '__len__') and isinstance(content[0], str)
403
+ return self._unboxed_result(results, unbox)
404
+
405
+ def _iter_rank_docs(
406
+ self, content, results: Optional['DocumentArray'] = None, source='matches'
407
+ ) -> Generator['Document', None, None]:
408
+ from docarray import Document
409
+
410
+ for c in content:
411
+ if isinstance(c, Document):
412
+ d = self._prepare_rank_doc(c, source)
413
+ else:
414
+ raise TypeError(f'Unsupported input type {c!r}')
415
+ if results is not None:
416
+ results.append(d)
417
+ yield d
418
+
419
+ def _get_rank_payload(
420
+ self, content, results: Optional['DocumentArray'] = None, **kwargs
421
+ ):
422
+ payload = dict(
423
+ inputs=self._iter_rank_docs(
424
+ content, results, source=kwargs.get('source', 'matches')
425
+ ),
426
+ request_size=kwargs.get('batch_size', 8),
427
+ total_docs=len(content) if hasattr(content, '__len__') else None,
428
+ )
429
+ if self._scheme == 'grpc' and self._authorization:
430
+ payload.update(metadata=(('authorization', self._authorization),))
431
+ elif self._scheme == 'http' and self._authorization:
432
+ payload.update(headers={'Authorization': self._authorization})
433
+ return payload
434
+
435
+ @staticmethod
436
+ def _prepare_single_doc(d: 'Document'):
437
+ if d.content_type in ('text', 'blob'):
438
+ return d
439
+ elif not d.blob and d.uri:
440
+ d.load_uri_to_blob()
441
+ return d
442
+ elif d.tensor is not None:
443
+ return d
444
+ else:
445
+ raise TypeError(f'Unsupported input type {d!r} {d.content_type}')
446
+
447
+ @staticmethod
448
+ def _prepare_rank_doc(d: 'Document', _source: str = 'matches'):
449
+ _get = lambda d: getattr(d, _source)
450
+ if not _get(d):
451
+ raise ValueError(f'`.rank()` requires every doc to have `.{_source}`')
452
+ d = Client._prepare_single_doc(d)
453
+ setattr(d, _source, [Client._prepare_single_doc(c) for c in _get(d)])
454
+ return d
455
+
456
+ def rank(
457
+ self, docs: Union['DocumentArray', Iterable['Document']], **kwargs
458
+ ) -> 'DocumentArray':
459
+ """Rank image-text matches according to the server CLIP model.
460
+ Given a Document with nested matches, where the root is image/text and the matches is in another modality, i.e.
461
+ text/image; this method ranks the matches according to the CLIP model.
462
+ Each match now has a new score inside ``clip_score`` and matches are sorted descendingly according to this score.
463
+ More details can be found in: https://github.com/openai/CLIP#usage
464
+
465
+ :param docs: the input Documents
466
+ :return: the ranked Documents in a DocumentArray.
467
+ """
468
+ if isinstance(docs, str):
469
+ raise TypeError(f'Content must be an Iterable of [Document]')
470
+
471
+ self._prepare_streaming(
472
+ not kwargs.get('show_progress'),
473
+ total=len(docs) if hasattr(docs, '__len__') else None,
474
+ )
475
+
476
+ on_done = kwargs.pop('on_done', None)
477
+ on_error = kwargs.pop('on_error', None)
478
+ on_always = kwargs.pop('on_always', None)
479
+ prefetch = kwargs.pop('prefetch', 100)
480
+ results = DocumentArray() if not on_done and not on_always else None
481
+ if not on_done:
482
+ on_done = partial(self._gather_result, results=results, attribute='matches')
483
+
484
+ with self._pbar:
485
+ parameters = kwargs.pop('parameters', {})
486
+ parameters['drop_image_content'] = parameters.get(
487
+ 'drop_image_content', True
488
+ )
489
+ model_name = parameters.get('model_name', '') if parameters else ''
490
+
491
+ self._client.post(
492
+ on=f'/rank/{model_name}'.rstrip('/'),
493
+ **self._get_rank_payload(docs, results, **kwargs),
494
+ on_done=on_done,
495
+ on_error=on_error,
496
+ on_always=partial(self._update_pbar, func=on_always),
497
+ parameters=parameters,
498
+ prefetch=prefetch,
499
+ )
500
+
501
+ return results
502
+
503
+ async def arank(
504
+ self, docs: Union['DocumentArray', Iterable['Document']], **kwargs
505
+ ) -> 'DocumentArray':
506
+ if isinstance(docs, str):
507
+ raise TypeError(f'Content must be an Iterable of [Document]')
508
+
509
+ self._prepare_streaming(
510
+ not kwargs.get('show_progress'),
511
+ total=len(docs) if hasattr(docs, '__len__') else None,
512
+ )
513
+ on_done = kwargs.pop('on_done', None)
514
+ on_error = kwargs.pop('on_error', None)
515
+ on_always = kwargs.pop('on_always', None)
516
+ prefetch = kwargs.pop('prefetch', 100)
517
+ results = DocumentArray() if not on_done and not on_always else None
518
+ if not on_done:
519
+ on_done = partial(self._gather_result, results=results, attribute='matches')
520
+
521
+ with self._pbar:
522
+ parameters = kwargs.pop('parameters', {})
523
+ parameters['drop_image_content'] = parameters.get(
524
+ 'drop_image_content', True
525
+ )
526
+ model_name = parameters.get('model_name', '') if parameters else ''
527
+
528
+ async for _ in self._async_client.post(
529
+ on=f'/rank/{model_name}'.rstrip('/'),
530
+ **self._get_rank_payload(docs, results, **kwargs),
531
+ on_done=on_done,
532
+ on_error=on_error,
533
+ on_always=partial(self._update_pbar, func=on_always),
534
+ parameters=parameters,
535
+ prefetch=prefetch,
536
+ ):
537
+ continue
538
+
539
+ return results
540
+
541
+ @overload
542
+ def index(
543
+ self,
544
+ content: Iterable[str],
545
+ *,
546
+ batch_size: Optional[int] = None,
547
+ show_progress: bool = False,
548
+ parameters: Optional[Dict] = None,
549
+ on_done: Optional['CallbackFnType'] = None,
550
+ on_error: Optional['CallbackFnType'] = None,
551
+ on_always: Optional['CallbackFnType'] = None,
552
+ prefetch: int = 100,
553
+ ):
554
+ """Index the images or texts where their embeddings are computed by the server CLIP model.
555
+
556
+ Each image and text must be represented as a string. The following strings are acceptable:
557
+ - local image filepath, will be considered as an image
558
+ - remote image http/https, will be considered as an image
559
+ - a dataURI, will be considered as an image
560
+ - plain text, will be considered as a sentence
561
+ :param content: an iterator of image URIs or sentences, each element is an image or a text sentence as a string.
562
+ :param batch_size: the number of elements in each request when sending ``content``
563
+ :param show_progress: if set, show a progress bar
564
+ :param parameters: the parameters for the indexing, you can specify the model to use when you have multiple models
565
+ :param on_done: the callback function executed while streaming, after successful completion of each request.
566
+ It takes the response ``DataRequest`` as the only argument
567
+ :param on_error: the callback function executed while streaming, after an error occurs in each request.
568
+ It takes the response ``DataRequest`` as the only argument
569
+ :param on_always: the callback function executed while streaming, after each request is completed.
570
+ It takes the response ``DataRequest`` as the only argument
571
+ :param prefetch: the number of in-flight batches made by the post() method. Use a lower value for expensive
572
+ operations, and a higher value for faster response times
573
+ :return: the embedding in a numpy ndarray with shape ``[N, D]``. ``N`` is in the same length of ``content``
574
+ """
575
+ ...
576
+
577
+ @overload
578
+ def index(
579
+ self,
580
+ content: Union['DocumentArray', Iterable['Document']],
581
+ *,
582
+ batch_size: Optional[int] = None,
583
+ show_progress: bool = False,
584
+ parameters: Optional[dict] = None,
585
+ on_done: Optional['CallbackFnType'] = None,
586
+ on_error: Optional['CallbackFnType'] = None,
587
+ on_always: Optional['CallbackFnType'] = None,
588
+ prefetch: int = 100,
589
+ ) -> 'DocumentArray':
590
+ """Index the images or texts where their embeddings are computed by the server CLIP model.
591
+
592
+ :param content: an iterable of :class:`docarray.Document`, each Document must be filled with `.uri`, `.text` or `.blob`.
593
+ :param batch_size: the number of elements in each request when sending ``content``
594
+ :param show_progress: if set, show a progress bar
595
+ :param parameters: the parameters for the indexing, you can specify the model to use when you have multiple models
596
+ :param on_done: the callback function executed while streaming, after successful completion of each request.
597
+ It takes the response ``DataRequest`` as the only argument
598
+ :param on_error: the callback function executed while streaming, after an error occurs in each request.
599
+ It takes the response ``DataRequest`` as the only argument
600
+ :param on_always: the callback function executed while streaming, after each request is completed.
601
+ It takes the response ``DataRequest`` as the only argument
602
+ :param prefetch: the number of in-flight batches made by the post() method. Use a lower value for expensive
603
+ operations, and a higher value for faster response times
604
+ :return: the embedding in a numpy ndarray with shape ``[N, D]``. ``N`` is in the same length of ``content``
605
+ """
606
+ ...
607
+
608
+ def index(self, content, **kwargs):
609
+ if isinstance(content, str):
610
+ raise TypeError(
611
+ f'content must be an Iterable of [str, Document], try `.index(["{content}"])` instead'
612
+ )
613
+
614
+ self._prepare_streaming(
615
+ not kwargs.get('show_progress'),
616
+ total=len(content) if hasattr(content, '__len__') else None,
617
+ )
618
+ on_done = kwargs.pop('on_done', None)
619
+ on_error = kwargs.pop('on_error', None)
620
+ on_always = kwargs.pop('on_always', None)
621
+ prefetch = kwargs.pop('prefetch', 100)
622
+ results = DocumentArray() if not on_done and not on_always else None
623
+ if not on_done:
624
+ on_done = partial(
625
+ self._gather_result, results=results, attribute='embedding'
626
+ )
627
+
628
+ with self._pbar:
629
+ parameters = kwargs.pop('parameters', {})
630
+ parameters['drop_image_content'] = parameters.get(
631
+ 'drop_image_content', True
632
+ )
633
+
634
+ self._client.post(
635
+ on='/index',
636
+ **self._get_post_payload(content, results, **kwargs),
637
+ on_done=on_done,
638
+ on_error=on_error,
639
+ on_always=partial(self._update_pbar, func=on_always),
640
+ parameters=parameters,
641
+ prefetch=prefetch,
642
+ )
643
+
644
+ return results
645
+
646
+ @overload
647
+ async def aindex(
648
+ self,
649
+ content: Iterator[str],
650
+ *,
651
+ batch_size: Optional[int] = None,
652
+ show_progress: bool = False,
653
+ parameters: Optional[Dict] = None,
654
+ on_done: Optional['CallbackFnType'] = None,
655
+ on_error: Optional['CallbackFnType'] = None,
656
+ on_always: Optional['CallbackFnType'] = None,
657
+ prefetch: int = 100,
658
+ ):
659
+ ...
660
+
661
+ @overload
662
+ async def aindex(
663
+ self,
664
+ content: Union['DocumentArray', Iterable['Document']],
665
+ *,
666
+ batch_size: Optional[int] = None,
667
+ show_progress: bool = False,
668
+ parameters: Optional[dict] = None,
669
+ on_done: Optional['CallbackFnType'] = None,
670
+ on_error: Optional['CallbackFnType'] = None,
671
+ on_always: Optional['CallbackFnType'] = None,
672
+ prefetch: int = 100,
673
+ ):
674
+ ...
675
+
676
+ async def aindex(self, content, **kwargs):
677
+ if isinstance(content, str):
678
+ raise TypeError(
679
+ f'content must be an Iterable of [str, Document], try `.aindex(["{content}"])` instead'
680
+ )
681
+
682
+ self._prepare_streaming(
683
+ not kwargs.get('show_progress'),
684
+ total=len(content) if hasattr(content, '__len__') else None,
685
+ )
686
+ on_done = kwargs.pop('on_done', None)
687
+ on_error = kwargs.pop('on_error', None)
688
+ on_always = kwargs.pop('on_always', None)
689
+ prefetch = kwargs.pop('prefetch', 100)
690
+ results = DocumentArray() if not on_done and not on_always else None
691
+ if not on_done:
692
+ on_done = partial(
693
+ self._gather_result, results=results, attribute='embedding'
694
+ )
695
+
696
+ with self._pbar:
697
+ parameters = kwargs.pop('parameters', {})
698
+ parameters['drop_image_content'] = parameters.get(
699
+ 'drop_image_content', True
700
+ )
701
+
702
+ async for _ in self._async_client.post(
703
+ on='/index',
704
+ **self._get_post_payload(content, results, **kwargs),
705
+ on_done=on_done,
706
+ on_error=on_error,
707
+ on_always=partial(self._update_pbar, func=on_always),
708
+ parameters=parameters,
709
+ prefetch=prefetch,
710
+ ):
711
+ continue
712
+
713
+ return results
714
+
715
+ @overload
716
+ def search(
717
+ self,
718
+ content: Iterable[str],
719
+ *,
720
+ limit: int = 10,
721
+ batch_size: Optional[int] = None,
722
+ show_progress: bool = False,
723
+ parameters: Optional[Dict] = None,
724
+ on_done: Optional['CallbackFnType'] = None,
725
+ on_error: Optional['CallbackFnType'] = None,
726
+ on_always: Optional['CallbackFnType'] = None,
727
+ prefetch: int = 100,
728
+ ) -> 'DocumentArray':
729
+ """Search for top k results for given query string or ``Document``.
730
+
731
+ If the input is a string, will use this string as query. If the input is a ``Document``,
732
+ will use this ``Document`` as query.
733
+
734
+ :param content: list of queries.
735
+ :param limit: the number of results to return.
736
+ :param batch_size: the number of elements in each request when sending ``content``.
737
+ :param show_progress: if set, show a progress bar.
738
+ :param parameters: parameters passed to search function.
739
+ :param on_done: the callback function executed while streaming, after successful completion of each request.
740
+ It takes the response ``DataRequest`` as the only argument
741
+ :param on_error: the callback function executed while streaming, after an error occurs in each request.
742
+ It takes the response ``DataRequest`` as the only argument
743
+ :param on_always: the callback function executed while streaming, after each request is completed.
744
+ It takes the response ``DataRequest`` as the only argument
745
+ :param prefetch: the number of in-flight batches made by the post() method. Use a lower value for expensive
746
+ operations, and a higher value for faster response times
747
+ """
748
+ ...
749
+
750
+ @overload
751
+ def search(
752
+ self,
753
+ content: Union['DocumentArray', Iterable['Document']],
754
+ *,
755
+ limit: int = 10,
756
+ batch_size: Optional[int] = None,
757
+ show_progress: bool = False,
758
+ parameters: Optional[dict] = None,
759
+ on_done: Optional['CallbackFnType'] = None,
760
+ on_error: Optional['CallbackFnType'] = None,
761
+ on_always: Optional['CallbackFnType'] = None,
762
+ prefetch: int = 100,
763
+ ) -> 'DocumentArray':
764
+ """Search for top k results for given query string or ``Document``.
765
+
766
+ If the input is a string, will use this string as query. If the input is a ``Document``,
767
+ will use this ``Document`` as query.
768
+
769
+ :param content: list of queries.
770
+ :param limit: the number of results to return.
771
+ :param batch_size: the number of elements in each request when sending ``content``.
772
+ :param show_progress: if set, show a progress bar.
773
+ :param parameters: parameters passed to search function.
774
+ :param on_done: the callback function executed while streaming, after successful completion of each request.
775
+ It takes the response ``DataRequest`` as the only argument
776
+ :param on_error: the callback function executed while streaming, after an error occurs in each request.
777
+ It takes the response ``DataRequest`` as the only argument
778
+ :param on_always: the callback function executed while streaming, after each request is completed.
779
+ It takes the response ``DataRequest`` as the only argument
780
+ :param prefetch: the number of in-flight batches made by the post() method. Use a lower value for expensive
781
+ operations, and a higher value for faster response times
782
+ """
783
+ ...
784
+
785
+ def search(self, content, limit: int = 10, **kwargs) -> 'DocumentArray':
786
+ if isinstance(content, str):
787
+ raise TypeError(
788
+ f'content must be an Iterable of [str, Document], try `.search(["{content}"])` instead'
789
+ )
790
+
791
+ self._prepare_streaming(
792
+ not kwargs.get('show_progress'),
793
+ total=len(content) if hasattr(content, '__len__') else None,
794
+ )
795
+ on_done = kwargs.pop('on_done', None)
796
+ on_error = kwargs.pop('on_error', None)
797
+ on_always = kwargs.pop('on_always', None)
798
+ prefetch = kwargs.pop('prefetch', 100)
799
+ results = DocumentArray() if not on_done and not on_always else None
800
+ if not on_done:
801
+ on_done = partial(self._gather_result, results=results, attribute='matches')
802
+
803
+ with self._pbar:
804
+ parameters = kwargs.pop('parameters', {})
805
+ parameters['limit'] = limit
806
+ parameters['drop_image_content'] = parameters.get(
807
+ 'drop_image_content', True
808
+ )
809
+
810
+ self._client.post(
811
+ on='/search',
812
+ **self._get_post_payload(content, results, **kwargs),
813
+ on_done=on_done,
814
+ on_error=on_error,
815
+ on_always=partial(self._update_pbar, func=on_always),
816
+ parameters=parameters,
817
+ prefetch=prefetch,
818
+ )
819
+
820
+ return results
821
+
822
+ @overload
823
+ async def asearch(
824
+ self,
825
+ content: Iterator[str],
826
+ *,
827
+ limit: int = 10,
828
+ batch_size: Optional[int] = None,
829
+ show_progress: bool = False,
830
+ parameters: Optional[Dict] = None,
831
+ on_done: Optional['CallbackFnType'] = None,
832
+ on_error: Optional['CallbackFnType'] = None,
833
+ on_always: Optional['CallbackFnType'] = None,
834
+ prefetch: int = 100,
835
+ ):
836
+ ...
837
+
838
+ @overload
839
+ async def asearch(
840
+ self,
841
+ content: Union['DocumentArray', Iterable['Document']],
842
+ *,
843
+ limit: int = 10,
844
+ batch_size: Optional[int] = None,
845
+ show_progress: bool = False,
846
+ parameters: Optional[dict] = None,
847
+ on_done: Optional['CallbackFnType'] = None,
848
+ on_error: Optional['CallbackFnType'] = None,
849
+ on_always: Optional['CallbackFnType'] = None,
850
+ prefetch: int = 100,
851
+ ):
852
+ ...
853
+
854
+ async def asearch(self, content, limit: int = 10, **kwargs):
855
+ if isinstance(content, str):
856
+ raise TypeError(
857
+ f'content must be an Iterable of [str, Document], try `.asearch(["{content}"])` instead'
858
+ )
859
+
860
+ self._prepare_streaming(
861
+ not kwargs.get('show_progress'),
862
+ total=len(content) if hasattr(content, '__len__') else None,
863
+ )
864
+ on_done = kwargs.pop('on_done', None)
865
+ on_error = kwargs.pop('on_error', None)
866
+ on_always = kwargs.pop('on_always', None)
867
+ prefetch = kwargs.pop('prefetch', 100)
868
+ results = DocumentArray() if not on_done and not on_always else None
869
+ if not on_done:
870
+ on_done = partial(self._gather_result, results=results, attribute='matches')
871
+
872
+ with self._pbar:
873
+ parameters = kwargs.pop('parameters', {})
874
+ parameters['limit'] = limit
875
+ parameters['drop_image_content'] = parameters.get(
876
+ 'drop_image_content', True
877
+ )
878
+
879
+ async for _ in self._async_client.post(
880
+ on='/search',
881
+ **self._get_post_payload(content, results, **kwargs),
882
+ on_done=on_done,
883
+ on_error=on_error,
884
+ on_always=partial(self._update_pbar, func=on_always),
885
+ parameters=parameters,
886
+ prefetch=prefetch,
887
+ ):
888
+ continue
889
+
890
+ return results
clip_as_service/client/clip_client/helper.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import sys
3
+ import threading
4
+ from packaging.version import Version
5
+ from urllib.request import Request, urlopen
6
+
7
+ import pkg_resources
8
+ from rich import print
9
+ from rich.panel import Panel
10
+
11
+
12
+ def _version_check(package: str = None, github_repo: str = None):
13
+ try:
14
+
15
+ if not package:
16
+ package = vars(sys.modules[__name__])['__package__']
17
+ if not github_repo:
18
+ github_repo = package
19
+
20
+ cur_ver = Version(pkg_resources.get_distribution(package).version)
21
+ req = Request(
22
+ f'https://pypi.python.org/pypi/{package}/json',
23
+ headers={'User-Agent': 'Mozilla/5.0'},
24
+ )
25
+ with urlopen(
26
+ req, timeout=1
27
+ ) as resp: # 'with' is important to close the resource after use
28
+ j = json.load(resp)
29
+ releases = j.get('releases', {})
30
+ latest_release_ver = max(
31
+ Version(v) for v in releases.keys() if '.dev' not in v
32
+ )
33
+ if cur_ver < latest_release_ver:
34
+ print(
35
+ Panel(
36
+ f'You are using [b]{package} {cur_ver}[/b], but [bold green]{latest_release_ver}[/] is available. '
37
+ f'You may upgrade it via [b]pip install -U {package}[/b]. [link=https://github.com/jina-ai/{github_repo}/releases]Read Changelog here[/link].',
38
+ title=':new: New version available!',
39
+ width=50,
40
+ )
41
+ )
42
+ except Exception:
43
+ # no network, too slow, PyPi is down
44
+ pass
45
+
46
+
47
+ def is_latest_version(package: str = None, github_repo: str = None) -> None:
48
+ """Check if there is a latest version from Pypi, set env `NO_VERSION_CHECK` to disable it.
49
+
50
+ :param package: package name if none auto-detected
51
+ :param github_repo: repo name that contains CHANGELOG if none then the same as package name
52
+ """
53
+
54
+ threading.Thread(target=_version_check, args=(package, github_repo)).start()
clip_as_service/client/setup.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from os import path
3
+
4
+ from setuptools import find_packages
5
+ from setuptools import setup
6
+
7
+ if sys.version_info < (3, 7, 0):
8
+ raise OSError(f'CLIP-as-service requires Python >=3.7, but yours is {sys.version}')
9
+
10
+ try:
11
+ pkg_name = 'clip-client'
12
+ libinfo_py = path.join(
13
+ path.dirname(__file__), pkg_name.replace('-', '_'), '__init__.py'
14
+ )
15
+ libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
16
+ version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
17
+ 0
18
+ ]
19
+ exec(version_line) # gives __version__
20
+ except FileNotFoundError as ex:
21
+ __version__ = '0.0.0'
22
+
23
+ try:
24
+ with open('../README.md', encoding='utf8') as fp:
25
+ _long_description = fp.read()
26
+ except FileNotFoundError:
27
+ _long_description = ''
28
+
29
+ setup(
30
+ name=pkg_name,
31
+ packages=find_packages(),
32
+ version=__version__,
33
+ include_package_data=True,
34
+ description='Embed images and sentences into fixed-length vectors via CLIP',
35
+ author='Jina AI',
36
+ author_email='hello@jina.ai',
37
+ license='Apache 2.0',
38
+ url='https://github.com/jina-ai/clip-as-service',
39
+ download_url='https://github.com/jina-ai/clip-as-service/tags',
40
+ long_description=_long_description,
41
+ long_description_content_type='text/markdown',
42
+ zip_safe=False,
43
+ setup_requires=['setuptools>=18.0', 'wheel'],
44
+ install_requires=[
45
+ 'jina>=3.12.0',
46
+ 'docarray[common]>=0.19.0,<0.30.0',
47
+ 'packaging',
48
+ ],
49
+ extras_require={
50
+ 'test': [
51
+ 'pytest',
52
+ 'pytest-timeout',
53
+ 'pytest-mock',
54
+ 'pytest-asyncio',
55
+ 'pytest-cov',
56
+ 'pytest-repeat',
57
+ 'pytest-reraise',
58
+ 'mock',
59
+ 'pytest-custom_exit_code',
60
+ 'black',
61
+ ],
62
+ },
63
+ classifiers=[
64
+ 'Development Status :: 5 - Production/Stable',
65
+ 'Intended Audience :: Developers',
66
+ 'Intended Audience :: Education',
67
+ 'Intended Audience :: Science/Research',
68
+ 'Programming Language :: Python :: 3.7',
69
+ 'Programming Language :: Python :: 3.8',
70
+ 'Programming Language :: Python :: 3.9',
71
+ 'Programming Language :: Python :: 3.10',
72
+ 'Programming Language :: Unix Shell',
73
+ 'Environment :: Console',
74
+ 'License :: OSI Approved :: Apache Software License',
75
+ 'Operating System :: OS Independent',
76
+ 'Topic :: Database :: Database Engines/Servers',
77
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
78
+ 'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
79
+ 'Topic :: Scientific/Engineering :: Image Recognition',
80
+ 'Topic :: Multimedia :: Video',
81
+ 'Topic :: Scientific/Engineering',
82
+ 'Topic :: Scientific/Engineering :: Mathematics',
83
+ 'Topic :: Software Development',
84
+ 'Topic :: Software Development :: Libraries',
85
+ 'Topic :: Software Development :: Libraries :: Python Modules',
86
+ ],
87
+ project_urls={
88
+ 'Documentation': 'https://clip-as-service.jina.ai',
89
+ 'Source': 'https://github.com/jina-ai/clip-as-service/',
90
+ 'Tracker': 'https://github.com/jina-ai/clip-as-service/issues',
91
+ },
92
+ keywords='jina openai clip deep-learning cross-modal multi-modal neural-search',
93
+ )
clip_as_service/docs/Makefile ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Minimal makefile for Sphinx documentation
2
+ # Used only for local building
3
+
4
+ # You can set these variables from the command line.
5
+ SPHINXOPTS =
6
+ SPHINXBUILD = sphinx-build
7
+ SOURCEDIR = .
8
+ BUILDDIR = _build
9
+
10
+ # Put it first so that "make" without argument is like "make help".
11
+ help:
12
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13
+
14
+ .PHONY: help Makefile
15
+
16
+ # Catch-all target: route all unknown targets to Sphinx using the new
17
+ # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
18
+ %: Makefile
19
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
clip_as_service/docs/_static/JCloud-dark.svg ADDED
clip_as_service/docs/_static/JCloud-light.svg ADDED
clip_as_service/docs/_static/banner.png ADDED

Git LFS Details

  • SHA256: b8ca01efc63c53cee1f9af55a59ea5429c8a2620be2b006aafab00b182f5f115
  • Pointer size: 131 Bytes
  • Size of remote file: 629 kB
clip_as_service/docs/_static/cas-dark.svg ADDED
clip_as_service/docs/_static/cas-grafana.json ADDED
@@ -0,0 +1,900 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__inputs": [
3
+ {
4
+ "name": "DS_PROMETHEUS",
5
+ "label": "Prometheus",
6
+ "description": "",
7
+ "type": "datasource",
8
+ "pluginId": "prometheus",
9
+ "pluginName": "Prometheus"
10
+ }
11
+ ],
12
+ "__elements": [],
13
+ "__requires": [
14
+ {
15
+ "type": "grafana",
16
+ "id": "grafana",
17
+ "name": "Grafana",
18
+ "version": "8.5.3"
19
+ },
20
+ {
21
+ "type": "panel",
22
+ "id": "piechart",
23
+ "name": "Pie chart",
24
+ "version": ""
25
+ },
26
+ {
27
+ "type": "datasource",
28
+ "id": "prometheus",
29
+ "name": "Prometheus",
30
+ "version": "1.0.0"
31
+ },
32
+ {
33
+ "type": "panel",
34
+ "id": "stat",
35
+ "name": "Stat",
36
+ "version": ""
37
+ },
38
+ {
39
+ "type": "panel",
40
+ "id": "timeseries",
41
+ "name": "Time series",
42
+ "version": ""
43
+ }
44
+ ],
45
+ "annotations": {
46
+ "list": [
47
+ {
48
+ "builtIn": 1,
49
+ "datasource": {
50
+ "type": "datasource",
51
+ "uid": "grafana"
52
+ },
53
+ "enable": true,
54
+ "hide": true,
55
+ "iconColor": "rgba(0, 211, 255, 1)",
56
+ "name": "Annotations & Alerts",
57
+ "target": {
58
+ "limit": 100,
59
+ "matchAny": false,
60
+ "tags": [],
61
+ "type": "dashboard"
62
+ },
63
+ "type": "dashboard"
64
+ }
65
+ ]
66
+ },
67
+ "description": "The datashboard for CLIP-as-service",
68
+ "editable": true,
69
+ "fiscalYearStartMonth": 0,
70
+ "graphTooltip": 0,
71
+ "id": null,
72
+ "iteration": 1654148217937,
73
+ "links": [],
74
+ "liveNow": false,
75
+ "panels": [
76
+ {
77
+ "datasource": {
78
+ "type": "prometheus",
79
+ "uid": "${DS_PROMETHEUS}"
80
+ },
81
+ "fieldConfig": {
82
+ "defaults": {
83
+ "color": {
84
+ "mode": "palette-classic"
85
+ },
86
+ "custom": {
87
+ "hideFrom": {
88
+ "legend": false,
89
+ "tooltip": false,
90
+ "viz": false
91
+ }
92
+ },
93
+ "mappings": [],
94
+ "unit": "s"
95
+ },
96
+ "overrides": [
97
+ {
98
+ "__systemRef": "hideSeriesFrom",
99
+ "matcher": {
100
+ "id": "byNames",
101
+ "options": {
102
+ "mode": "exclude",
103
+ "names": [
104
+ "gateway overhead",
105
+ "gateway/worker network",
106
+ "processing-",
107
+ "preproc text"
108
+ ],
109
+ "prefix": "All except:",
110
+ "readOnly": true
111
+ }
112
+ },
113
+ "properties": [
114
+ {
115
+ "id": "custom.hideFrom",
116
+ "value": {
117
+ "legend": false,
118
+ "tooltip": false,
119
+ "viz": true
120
+ }
121
+ }
122
+ ]
123
+ }
124
+ ]
125
+ },
126
+ "gridPos": {
127
+ "h": 16,
128
+ "w": 13,
129
+ "x": 0,
130
+ "y": 0
131
+ },
132
+ "id": 41,
133
+ "options": {
134
+ "displayLabels": [
135
+ "name"
136
+ ],
137
+ "legend": {
138
+ "displayMode": "table",
139
+ "placement": "right",
140
+ "values": [
141
+ "value",
142
+ "percent"
143
+ ]
144
+ },
145
+ "pieType": "pie",
146
+ "reduceOptions": {
147
+ "calcs": [
148
+ "lastNotNull"
149
+ ],
150
+ "fields": "",
151
+ "values": false
152
+ },
153
+ "tooltip": {
154
+ "mode": "single",
155
+ "sort": "none"
156
+ }
157
+ },
158
+ "pluginVersion": "8.4.4",
159
+ "targets": [
160
+ {
161
+ "datasource": {
162
+ "type": "prometheus",
163
+ "uid": "${DS_PROMETHEUS}"
164
+ },
165
+ "exemplar": true,
166
+ "expr": "jina_receiving_request_seconds_sum / jina_receiving_request_seconds_count",
167
+ "hide": false,
168
+ "interval": "",
169
+ "legendFormat": "receiving-{{job}}",
170
+ "refId": "A"
171
+ },
172
+ {
173
+ "datasource": {
174
+ "type": "prometheus",
175
+ "uid": "${DS_PROMETHEUS}"
176
+ },
177
+ "exemplar": true,
178
+ "expr": "jina_sending_request_seconds_sum / jina_sending_request_seconds_count",
179
+ "hide": false,
180
+ "interval": "",
181
+ "legendFormat": "sending-{{job}}",
182
+ "refId": "D"
183
+ },
184
+ {
185
+ "datasource": {
186
+ "type": "prometheus",
187
+ "uid": "${DS_PROMETHEUS}"
188
+ },
189
+ "exemplar": true,
190
+ "expr": "jina_preprocess_texts_seconds_sum / jina_preprocess_texts_seconds_count",
191
+ "hide": false,
192
+ "interval": "",
193
+ "legendFormat": "preproc text",
194
+ "refId": "B"
195
+ },
196
+ {
197
+ "datasource": {
198
+ "type": "prometheus",
199
+ "uid": "${DS_PROMETHEUS}"
200
+ },
201
+ "editorMode": "code",
202
+ "exemplar": true,
203
+ "expr": "jina_encode_texts_seconds_sum / jina_encode_texts_seconds_count",
204
+ "hide": false,
205
+ "interval": "",
206
+ "legendFormat": "encode text",
207
+ "range": true,
208
+ "refId": "C"
209
+ },
210
+ {
211
+ "datasource": {
212
+ "type": "prometheus",
213
+ "uid": "${DS_PROMETHEUS}"
214
+ },
215
+ "editorMode": "code",
216
+ "exemplar": true,
217
+ "expr": "jina_process_request_seconds_sum / jina_process_request_seconds_count",
218
+ "hide": false,
219
+ "interval": "",
220
+ "legendFormat": "processing-encode",
221
+ "range": true,
222
+ "refId": "E"
223
+ },
224
+ {
225
+ "datasource": {
226
+ "type": "prometheus",
227
+ "uid": "${DS_PROMETHEUS}"
228
+ },
229
+ "editorMode": "code",
230
+ "expr": "jina_preprocess_images_seconds_sum / jina_preprocess_images_seconds_count",
231
+ "hide": false,
232
+ "legendFormat": "preproc image",
233
+ "range": true,
234
+ "refId": "F"
235
+ },
236
+ {
237
+ "datasource": {
238
+ "type": "prometheus",
239
+ "uid": "${DS_PROMETHEUS}"
240
+ },
241
+ "editorMode": "code",
242
+ "expr": "jina_encode_images_seconds_sum / jina_encode_images_seconds_count",
243
+ "hide": false,
244
+ "legendFormat": "encode image",
245
+ "range": true,
246
+ "refId": "G"
247
+ }
248
+ ],
249
+ "title": "life cycle of a request",
250
+ "transformations": [
251
+ {
252
+ "id": "calculateField",
253
+ "options": {
254
+ "alias": "gateway overhead",
255
+ "binary": {
256
+ "left": "receiving-gateway",
257
+ "operator": "-",
258
+ "reducer": "sum",
259
+ "right": "sending-gateway"
260
+ },
261
+ "mode": "binary",
262
+ "reduce": {
263
+ "reducer": "sum"
264
+ }
265
+ }
266
+ },
267
+ {
268
+ "id": "calculateField",
269
+ "options": {
270
+ "alias": "worker-overhead",
271
+ "binary": {
272
+ "left": "receiving-exec",
273
+ "operator": "-",
274
+ "reducer": "sum",
275
+ "right": "processing-encode"
276
+ },
277
+ "mode": "binary",
278
+ "reduce": {
279
+ "reducer": "sum"
280
+ }
281
+ }
282
+ },
283
+ {
284
+ "id": "calculateField",
285
+ "options": {
286
+ "alias": "text-model-inference",
287
+ "binary": {
288
+ "left": "processing-encode",
289
+ "operator": "-",
290
+ "reducer": "sum",
291
+ "right": "preproc text"
292
+ },
293
+ "mode": "binary",
294
+ "reduce": {
295
+ "reducer": "sum"
296
+ }
297
+ }
298
+ },
299
+ {
300
+ "id": "calculateField",
301
+ "options": {
302
+ "alias": "gateway/worker network",
303
+ "binary": {
304
+ "left": "sending-gateway",
305
+ "operator": "-",
306
+ "reducer": "sum",
307
+ "right": "receiving-exec"
308
+ },
309
+ "mode": "binary",
310
+ "reduce": {
311
+ "reducer": "sum"
312
+ }
313
+ }
314
+ },
315
+ {
316
+ "id": "calculateField",
317
+ "options": {
318
+ "alias": "visual-model-inference",
319
+ "binary": {
320
+ "left": "processing-encode",
321
+ "reducer": "sum",
322
+ "right": "preproc image"
323
+ },
324
+ "mode": "binary",
325
+ "reduce": {
326
+ "reducer": "sum"
327
+ }
328
+ }
329
+ }
330
+ ],
331
+ "type": "piechart"
332
+ },
333
+ {
334
+ "datasource": {
335
+ "type": "prometheus",
336
+ "uid": "${DS_PROMETHEUS}"
337
+ },
338
+ "fieldConfig": {
339
+ "defaults": {
340
+ "color": {
341
+ "mode": "thresholds"
342
+ },
343
+ "mappings": [],
344
+ "thresholds": {
345
+ "mode": "absolute",
346
+ "steps": [
347
+ {
348
+ "color": "green",
349
+ "value": null
350
+ },
351
+ {
352
+ "color": "red",
353
+ "value": 80
354
+ }
355
+ ]
356
+ }
357
+ },
358
+ "overrides": []
359
+ },
360
+ "gridPos": {
361
+ "h": 8,
362
+ "w": 6,
363
+ "x": 15,
364
+ "y": 0
365
+ },
366
+ "id": 32,
367
+ "options": {
368
+ "colorMode": "value",
369
+ "graphMode": "area",
370
+ "justifyMode": "auto",
371
+ "orientation": "auto",
372
+ "reduceOptions": {
373
+ "calcs": [
374
+ "lastNotNull"
375
+ ],
376
+ "fields": "",
377
+ "values": false
378
+ },
379
+ "textMode": "auto"
380
+ },
381
+ "pluginVersion": "8.5.3",
382
+ "targets": [
383
+ {
384
+ "datasource": {
385
+ "type": "prometheus",
386
+ "uid": "${DS_PROMETHEUS}"
387
+ },
388
+ "exemplar": true,
389
+ "expr": "jina_receiving_request_seconds_count{runtime_name=~\"gateway.*\"}",
390
+ "instant": false,
391
+ "interval": "",
392
+ "intervalFactor": 1,
393
+ "legendFormat": "",
394
+ "refId": "A"
395
+ }
396
+ ],
397
+ "title": "Number of Request processed ",
398
+ "type": "stat"
399
+ },
400
+ {
401
+ "datasource": {
402
+ "type": "prometheus",
403
+ "uid": "${DS_PROMETHEUS}"
404
+ },
405
+ "fieldConfig": {
406
+ "defaults": {
407
+ "color": {
408
+ "mode": "palette-classic"
409
+ },
410
+ "custom": {
411
+ "axisLabel": "",
412
+ "axisPlacement": "auto",
413
+ "barAlignment": 0,
414
+ "drawStyle": "line",
415
+ "fillOpacity": 0,
416
+ "gradientMode": "none",
417
+ "hideFrom": {
418
+ "legend": false,
419
+ "tooltip": false,
420
+ "viz": false
421
+ },
422
+ "lineInterpolation": "linear",
423
+ "lineWidth": 1,
424
+ "pointSize": 5,
425
+ "scaleDistribution": {
426
+ "type": "linear"
427
+ },
428
+ "showPoints": "auto",
429
+ "spanNulls": false,
430
+ "stacking": {
431
+ "group": "A",
432
+ "mode": "none"
433
+ },
434
+ "thresholdsStyle": {
435
+ "mode": "off"
436
+ }
437
+ },
438
+ "mappings": [],
439
+ "thresholds": {
440
+ "mode": "absolute",
441
+ "steps": [
442
+ {
443
+ "color": "green",
444
+ "value": null
445
+ },
446
+ {
447
+ "color": "red",
448
+ "value": 80
449
+ }
450
+ ]
451
+ },
452
+ "unit": "s"
453
+ },
454
+ "overrides": []
455
+ },
456
+ "gridPos": {
457
+ "h": 8,
458
+ "w": 15,
459
+ "x": 0,
460
+ "y": 16
461
+ },
462
+ "id": 39,
463
+ "options": {
464
+ "legend": {
465
+ "calcs": [],
466
+ "displayMode": "list",
467
+ "placement": "bottom"
468
+ },
469
+ "tooltip": {
470
+ "mode": "single",
471
+ "sort": "none"
472
+ }
473
+ },
474
+ "targets": [
475
+ {
476
+ "datasource": {
477
+ "type": "prometheus",
478
+ "uid": "${DS_PROMETHEUS}"
479
+ },
480
+ "exemplar": true,
481
+ "expr": "jina_receiving_request_seconds_sum / jina_receiving_request_seconds_count",
482
+ "interval": "",
483
+ "legendFormat": "{{runtime_name}}",
484
+ "refId": "A"
485
+ }
486
+ ],
487
+ "title": "jina_receiving_request_seconds_sum",
488
+ "type": "timeseries"
489
+ },
490
+ {
491
+ "collapsed": false,
492
+ "datasource": {
493
+ "type": "prometheus",
494
+ "uid": "PBFA97CFB590B2093"
495
+ },
496
+ "gridPos": {
497
+ "h": 1,
498
+ "w": 24,
499
+ "x": 0,
500
+ "y": 24
501
+ },
502
+ "id": 4,
503
+ "panels": [],
504
+ "repeat": "Executor",
505
+ "title": "$Executor",
506
+ "type": "row"
507
+ },
508
+ {
509
+ "datasource": {
510
+ "type": "prometheus",
511
+ "uid": "${DS_PROMETHEUS}"
512
+ },
513
+ "fieldConfig": {
514
+ "defaults": {
515
+ "color": {
516
+ "mode": "thresholds"
517
+ },
518
+ "mappings": [],
519
+ "thresholds": {
520
+ "mode": "absolute",
521
+ "steps": [
522
+ {
523
+ "color": "green"
524
+ }
525
+ ]
526
+ }
527
+ },
528
+ "overrides": []
529
+ },
530
+ "gridPos": {
531
+ "h": 5,
532
+ "w": 8,
533
+ "x": 0,
534
+ "y": 25
535
+ },
536
+ "id": 2,
537
+ "options": {
538
+ "colorMode": "value",
539
+ "graphMode": "area",
540
+ "justifyMode": "auto",
541
+ "orientation": "auto",
542
+ "reduceOptions": {
543
+ "calcs": [
544
+ "lastNotNull"
545
+ ],
546
+ "fields": "",
547
+ "values": false
548
+ },
549
+ "textMode": "auto"
550
+ },
551
+ "pluginVersion": "8.5.3",
552
+ "targets": [
553
+ {
554
+ "datasource": {
555
+ "type": "prometheus",
556
+ "uid": "${DS_PROMETHEUS}"
557
+ },
558
+ "exemplar": true,
559
+ "expr": "jina_document_processed_total{runtime_name=\"$Executor\"}",
560
+ "instant": false,
561
+ "interval": "",
562
+ "intervalFactor": 1,
563
+ "legendFormat": "{{executor_endpoint}}",
564
+ "refId": "A"
565
+ }
566
+ ],
567
+ "title": "Number of Documents processed per endpoint",
568
+ "type": "stat"
569
+ },
570
+ {
571
+ "datasource": {
572
+ "type": "prometheus",
573
+ "uid": "${DS_PROMETHEUS}"
574
+ },
575
+ "fieldConfig": {
576
+ "defaults": {
577
+ "color": {
578
+ "mode": "thresholds"
579
+ },
580
+ "mappings": [],
581
+ "thresholds": {
582
+ "mode": "absolute",
583
+ "steps": [
584
+ {
585
+ "color": "green"
586
+ },
587
+ {
588
+ "color": "red",
589
+ "value": 80
590
+ }
591
+ ]
592
+ }
593
+ },
594
+ "overrides": []
595
+ },
596
+ "gridPos": {
597
+ "h": 5,
598
+ "w": 8,
599
+ "x": 8,
600
+ "y": 25
601
+ },
602
+ "id": 7,
603
+ "options": {
604
+ "colorMode": "value",
605
+ "graphMode": "area",
606
+ "justifyMode": "auto",
607
+ "orientation": "auto",
608
+ "reduceOptions": {
609
+ "calcs": [
610
+ "lastNotNull"
611
+ ],
612
+ "fields": "",
613
+ "values": false
614
+ },
615
+ "textMode": "auto"
616
+ },
617
+ "pluginVersion": "8.5.3",
618
+ "targets": [
619
+ {
620
+ "datasource": {
621
+ "type": "prometheus",
622
+ "uid": "${DS_PROMETHEUS}"
623
+ },
624
+ "exemplar": true,
625
+ "expr": "jina_process_request_seconds_count{runtime_name=\"$Executor\"}",
626
+ "instant": false,
627
+ "interval": "",
628
+ "intervalFactor": 1,
629
+ "legendFormat": "{{executor_endpoint}}",
630
+ "refId": "A"
631
+ }
632
+ ],
633
+ "title": "Number of requests per endpoint",
634
+ "type": "stat"
635
+ },
636
+ {
637
+ "datasource": {
638
+ "type": "prometheus",
639
+ "uid": "${DS_PROMETHEUS}"
640
+ },
641
+ "fieldConfig": {
642
+ "defaults": {
643
+ "color": {
644
+ "mode": "palette-classic"
645
+ },
646
+ "custom": {
647
+ "axisLabel": "",
648
+ "axisPlacement": "auto",
649
+ "barAlignment": 0,
650
+ "drawStyle": "line",
651
+ "fillOpacity": 0,
652
+ "gradientMode": "none",
653
+ "hideFrom": {
654
+ "legend": false,
655
+ "tooltip": false,
656
+ "viz": false
657
+ },
658
+ "lineInterpolation": "linear",
659
+ "lineWidth": 1,
660
+ "pointSize": 5,
661
+ "scaleDistribution": {
662
+ "type": "linear"
663
+ },
664
+ "showPoints": "auto",
665
+ "spanNulls": false,
666
+ "stacking": {
667
+ "group": "A",
668
+ "mode": "none"
669
+ },
670
+ "thresholdsStyle": {
671
+ "mode": "off"
672
+ }
673
+ },
674
+ "mappings": [],
675
+ "thresholds": {
676
+ "mode": "absolute",
677
+ "steps": [
678
+ {
679
+ "color": "green"
680
+ },
681
+ {
682
+ "color": "red",
683
+ "value": 80
684
+ }
685
+ ]
686
+ },
687
+ "unit": "s"
688
+ },
689
+ "overrides": []
690
+ },
691
+ "gridPos": {
692
+ "h": 6,
693
+ "w": 18,
694
+ "x": 0,
695
+ "y": 30
696
+ },
697
+ "id": 12,
698
+ "options": {
699
+ "legend": {
700
+ "calcs": [],
701
+ "displayMode": "list",
702
+ "placement": "bottom"
703
+ },
704
+ "tooltip": {
705
+ "mode": "single",
706
+ "sort": "none"
707
+ }
708
+ },
709
+ "targets": [
710
+ {
711
+ "datasource": {
712
+ "type": "prometheus",
713
+ "uid": "${DS_PROMETHEUS}"
714
+ },
715
+ "exemplar": true,
716
+ "expr": "jina_process_request_seconds_sum{runtime_name=\"$Executor\"} / jina_process_request_seconds_count{runtime_name=\"$Executor\"}",
717
+ "interval": "",
718
+ "legendFormat": "{{executor_endpoint}}-process",
719
+ "refId": "A"
720
+ }
721
+ ],
722
+ "title": "Time spend calling the Executor method link the to endpoint",
723
+ "type": "timeseries"
724
+ },
725
+ {
726
+ "datasource": {
727
+ "type": "prometheus",
728
+ "uid": "${DS_PROMETHEUS}"
729
+ },
730
+ "fieldConfig": {
731
+ "defaults": {
732
+ "color": {
733
+ "mode": "palette-classic"
734
+ },
735
+ "custom": {
736
+ "axisLabel": "",
737
+ "axisPlacement": "auto",
738
+ "barAlignment": 0,
739
+ "drawStyle": "line",
740
+ "fillOpacity": 0,
741
+ "gradientMode": "none",
742
+ "hideFrom": {
743
+ "legend": false,
744
+ "tooltip": false,
745
+ "viz": false
746
+ },
747
+ "lineInterpolation": "linear",
748
+ "lineWidth": 1,
749
+ "pointSize": 5,
750
+ "scaleDistribution": {
751
+ "type": "linear"
752
+ },
753
+ "showPoints": "auto",
754
+ "spanNulls": false,
755
+ "stacking": {
756
+ "group": "A",
757
+ "mode": "none"
758
+ },
759
+ "thresholdsStyle": {
760
+ "mode": "off"
761
+ }
762
+ },
763
+ "mappings": [],
764
+ "thresholds": {
765
+ "mode": "absolute",
766
+ "steps": [
767
+ {
768
+ "color": "green"
769
+ },
770
+ {
771
+ "color": "red",
772
+ "value": 80
773
+ }
774
+ ]
775
+ },
776
+ "unit": "s"
777
+ },
778
+ "overrides": []
779
+ },
780
+ "gridPos": {
781
+ "h": 6,
782
+ "w": 18,
783
+ "x": 0,
784
+ "y": 36
785
+ },
786
+ "id": 17,
787
+ "options": {
788
+ "legend": {
789
+ "calcs": [],
790
+ "displayMode": "list",
791
+ "placement": "bottom"
792
+ },
793
+ "tooltip": {
794
+ "mode": "single",
795
+ "sort": "none"
796
+ }
797
+ },
798
+ "targets": [
799
+ {
800
+ "datasource": {
801
+ "type": "prometheus",
802
+ "uid": "${DS_PROMETHEUS}"
803
+ },
804
+ "exemplar": true,
805
+ "expr": "jina_receiving_request_seconds_sum{runtime_name=\"$Executor\"} / jina_receiving_request_seconds_count{runtime_name=\"$Executor\"}",
806
+ "interval": "",
807
+ "legendFormat": "{{executor_endpoint}}",
808
+ "refId": "A"
809
+ }
810
+ ],
811
+ "title": "Time spend calling between receiving and responding ",
812
+ "type": "timeseries"
813
+ }
814
+ ],
815
+ "refresh": "",
816
+ "schemaVersion": 36,
817
+ "style": "dark",
818
+ "tags": [
819
+ "clip",
820
+ "jina"
821
+ ],
822
+ "templating": {
823
+ "list": [
824
+ {
825
+ "current": {},
826
+ "datasource": {
827
+ "type": "prometheus",
828
+ "uid": "${DS_PROMETHEUS}"
829
+ },
830
+ "definition": "label_values(jina_document_processed_created,executor_endpoint)\n",
831
+ "description": "",
832
+ "hide": 0,
833
+ "includeAll": true,
834
+ "multi": true,
835
+ "name": "Endpoint",
836
+ "options": [],
837
+ "query": {
838
+ "query": "label_values(jina_document_processed_created,executor_endpoint)\n",
839
+ "refId": "StandardVariableQuery"
840
+ },
841
+ "refresh": 1,
842
+ "regex": "",
843
+ "skipUrlSync": false,
844
+ "sort": 0,
845
+ "type": "query"
846
+ },
847
+ {
848
+ "current": {},
849
+ "datasource": {
850
+ "type": "prometheus",
851
+ "uid": "${DS_PROMETHEUS}"
852
+ },
853
+ "definition": "label_values(jina_document_processed_created,runtime_name)\n",
854
+ "description": "",
855
+ "hide": 0,
856
+ "includeAll": true,
857
+ "multi": true,
858
+ "name": "Executor",
859
+ "options": [],
860
+ "query": {
861
+ "query": "label_values(jina_document_processed_created,runtime_name)\n",
862
+ "refId": "StandardVariableQuery"
863
+ },
864
+ "refresh": 1,
865
+ "regex": "",
866
+ "skipUrlSync": false,
867
+ "sort": 0,
868
+ "type": "query"
869
+ },
870
+ {
871
+ "current": {
872
+ "selected": false,
873
+ "text": "Prometheus",
874
+ "value": "Prometheus"
875
+ },
876
+ "hide": 0,
877
+ "includeAll": false,
878
+ "multi": false,
879
+ "name": "datasource",
880
+ "options": [],
881
+ "query": "prometheus",
882
+ "queryValue": "",
883
+ "refresh": 1,
884
+ "regex": "",
885
+ "skipUrlSync": false,
886
+ "type": "datasource"
887
+ }
888
+ ]
889
+ },
890
+ "time": {
891
+ "from": "now-5m",
892
+ "to": "now"
893
+ },
894
+ "timepicker": {},
895
+ "timezone": "",
896
+ "title": "clip-as-service",
897
+ "uid": "e_4RtOlnz",
898
+ "version": 3,
899
+ "weekStart": ""
900
+ }
clip_as_service/docs/_static/cas-light.svg ADDED
clip_as_service/docs/_static/colab-banner.png ADDED

Git LFS Details

  • SHA256: e7701cf44db53431dab93c82cf138b632c3fea613456a99fbcb5a74297d250b3
  • Pointer size: 131 Bytes
  • Size of remote file: 419 kB
clip_as_service/docs/_static/demo-embed.html ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <script src="https://cdn.jsdelivr.net/npm/vue@2/dist/vue.js"></script>
2
+ <script src='https://cdnjs.cloudflare.com/ajax/libs/jquery/2.1.3/jquery.min.js'></script>
3
+
4
+ <div id="demo-embed">
5
+
6
+
7
+ <table class="embeddingInput">
8
+
9
+ <tr>
10
+ <td>Input a sentence (or an image URL)</td>
11
+ <td><textarea v-model="query"
12
+ placeholder="Input a sentence or an image URL"
13
+ style="width: 80%"
14
+ rows="6"
15
+ maxlength="1000">
16
+ </textarea></td>
17
+ </tr>
18
+
19
+ <tr>
20
+ <td>Click an image to select</td>
21
+ <td>
22
+ <div class="gallery">
23
+ <img @click="query='https://picsum.photos/id/'+image.id+'/50'" class="gallery-image"
24
+ :src="'https://picsum.photos/id/'+image.id+'/80'" v-for="image in images">
25
+ </div>
26
+ </td>
27
+ </tr>
28
+
29
+ <tr>
30
+ <td>Upload image from local</td>
31
+ <td><input type="file" @change="encodeImageFileAsURL" accept=".jpg, .jpeg, .png"/></td>
32
+ </tr>
33
+
34
+ </table>
35
+
36
+ <div v-if="query && embedding" class="embeddingChart">
37
+
38
+ <div>
39
+ <p>Done in {{elapsed}}ms
40
+ <img class="thumbnail" v-if="isUrl" :src="query" :alt="query + 'is not a valid image'">
41
+ <code v-else>{{query}}</code>
42
+ </p>
43
+ </span>
44
+ </div>
45
+
46
+ <div class="embeddingBlock" v-for="value in embedding" v-bind:style="{opacity: normalize_value(value)}"
47
+ v-bind:title="value">
48
+ <span v-if="showValue">{{value.toString().charAt(3)}}</span>
49
+ </div>
50
+
51
+ <div>
52
+
53
+ <input type="checkbox" id="checkbox" v-model="showValue"/>
54
+ <label for="checkbox">Show embedding values (that visually make no sense but people like it as it was doing
55
+ some real science stuff)</label>
56
+ </div>
57
+
58
+ </div>
59
+
60
+
61
+ </div>
62
+
63
+ <style>
64
+ #demo-embed {
65
+ font-family: var(--font-stack) !important;
66
+ }
67
+
68
+ .gallery-image:hover {
69
+ opacity: 100%;
70
+ }
71
+
72
+ .gallery-image {
73
+ opacity: 50%;
74
+ transition: opacity 0.3s;
75
+ -webkit-transition: opacity 0.3s;
76
+ cursor: pointer;
77
+ }
78
+
79
+ .thumbnail {
80
+ max-width: 64px;
81
+ max-height: 64px;
82
+ }
83
+
84
+ .embeddingChart {
85
+ margin-top: 30px;
86
+ margin-bottom: 30px;
87
+ }
88
+
89
+ .embeddingBlock {
90
+ width: 8px;
91
+ height: 8px;
92
+ display: inline-flex;
93
+ background: green;
94
+ border-style: solid;
95
+ border-color: white;
96
+ border-width: 1px;
97
+ font-size: 1vmin;
98
+ color: white;
99
+ text-align: center;
100
+ vertical-align: middle;
101
+ justify-content: center;
102
+ align-items: center;
103
+ transition: opacity 0.3s;
104
+ -webkit-transition: opacity 0.3s;
105
+ cursor: pointer;
106
+ }
107
+
108
+ .embeddingBlock:hover {
109
+ border-color: green;
110
+ }
111
+
112
+
113
+ </style>
114
+
115
+ <script>
116
+ function randomIntFromInterval(min, max) { // min and max included
117
+ return Math.floor(Math.random() * (max - min + 1) + min)
118
+ }
119
+
120
+ var app = new Vue({
121
+ el: '#demo-embed',
122
+ data: {
123
+ serverAddress: `https://api.clip.jina.ai:8443`,
124
+ query: 'First do it, then do it right, then do it better',
125
+ embedding: [1, 1, 1],
126
+ max_embed_value: 0,
127
+ min_embed_value: 0,
128
+ elapsed: 0,
129
+ showValue: true,
130
+ images: []
131
+ },
132
+ computed: {
133
+ isUrl: function () {
134
+ let url;
135
+
136
+ try {
137
+ url = new URL(this.query);
138
+ } catch (_) {
139
+ return false;
140
+ }
141
+
142
+ return url.protocol === "http:" || url.protocol === "https:" || url.protocol === "data:";
143
+ },
144
+ // get only
145
+ payload: function () {
146
+ return {
147
+ data: [this.isUrl ? {
148
+ uri: this.query
149
+ } : {
150
+ text: this.query
151
+ }],
152
+ exec_endpoint: '/',
153
+ }
154
+ }
155
+ },
156
+ mounted: function () {
157
+ this.$nextTick(function () {
158
+ app.callJina();
159
+
160
+ $.getJSON("https://picsum.photos/v2/list?page=" + randomIntFromInterval(1, 40) + "&limit=10", function (json) {
161
+ app.images = json
162
+ });
163
+
164
+ })
165
+ },
166
+ watch: {
167
+ query: function (newQ, oldQ) {
168
+ this.callJina()
169
+ }
170
+ },
171
+ methods: {
172
+ encodeImageFileAsURL(element) {
173
+ var file = element.target.files[0];
174
+ var reader = new FileReader();
175
+ reader.onloadend = function () {
176
+ app.query = reader.result
177
+ }
178
+ reader.readAsDataURL(file);
179
+ },
180
+ normalize_value(val) {
181
+ r = (val - this.min_embed_value) / (this.max_embed_value - this.min_embed_value)
182
+ r = (r * 10).toFixed(0) / 10
183
+ return r
184
+ },
185
+ callJina: function () {
186
+
187
+ $.ajax({
188
+ headers: {
189
+ Authorization: "d28b93ccbd13367148d05fe3f7fbc680"
190
+ },
191
+ type: "POST",
192
+ url: this.serverAddress + "/post",
193
+ data: JSON.stringify(this.payload),
194
+ contentType: "application/json; charset=utf-8",
195
+ dataType: "json",
196
+ }).success(function (data, textStatus, jqXHR) {
197
+ // data.data[0].embedding
198
+ app.embedding = data.data[0].embedding
199
+ app.max_embed_value = Math.max.apply(null, app.embedding)
200
+ app.min_embed_value = Math.min.apply(null, app.embedding)
201
+
202
+ date1 = new Date(data.routes[0].startTime)
203
+ date2 = new Date(data.routes[0].endTime)
204
+ app.elapsed = date2 - date1
205
+
206
+ }).fail(function () {
207
+ console.error("bad connection!")
208
+ });
209
+ }
210
+ }
211
+ })
212
+
213
+ </script>
clip_as_service/docs/_static/demo-text-rank.html ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <script src="https://cdn.jsdelivr.net/npm/vue@2/dist/vue.js"></script>
2
+ <script src='https://cdnjs.cloudflare.com/ajax/libs/jquery/2.1.3/jquery.min.js'></script>
3
+
4
+ <div id="demo-embed">
5
+ <table class="embeddingInput">
6
+ <tr>
7
+ <td>Input an image URL</td>
8
+ <td><input v-model="query"
9
+ placeholder="Input an image URL"
10
+ style="width: 80%"
11
+ maxlength="1000"></td>
12
+ </tr>
13
+
14
+ <tr>
15
+ <td>Click an image to select</td>
16
+ <td>
17
+ <div class="gallery">
18
+ <img @click="query='https://picsum.photos/id/'+image.id+'/50'" class="gallery-image"
19
+ :src="'https://picsum.photos/id/'+image.id+'/80'" v-for="image in images">
20
+ </div>
21
+ </td>
22
+ </tr>
23
+
24
+ <tr>
25
+ <td>Upload image from local</td>
26
+ <td><input type="file" @change="encodeImageFileAsURL" accept=".jpg, .jpeg, .png"/></td>
27
+ </tr>
28
+
29
+ </table>
30
+
31
+ <p>
32
+ <button v-on:click="addPrompt">+ Add prompt</button>
33
+ <button v-on:click="rmPrompt">- Remove prompt</button>
34
+ </p>
35
+ <li v-for="item in prompts" :key="item.id">
36
+ <input v-model="item.text" placeholder="edit me" size="50">
37
+ </li>
38
+
39
+ <p>
40
+ <div v-if="query && embedding" class="embeddingChart">
41
+
42
+ <div>
43
+ <p>Done in {{elapsed}}ms
44
+ <span>
45
+ <img class="thumbnail" v-if="isUrl" :src="query" :alt="query + 'is not a valid image'">
46
+ <code v-else>{{query}}</code>
47
+ </span>
48
+ </p>
49
+ </div>
50
+
51
+ </div>
52
+ </p>
53
+ <p>
54
+ Showing reasoning results (score in softmax):
55
+ <table>
56
+ <tr v-for="item in matches">
57
+ <td style="width: 300px"
58
+ :style="{background: 'linear-gradient(90deg, #00bfa5 ' +(item.scores['clip_score'].value * 100).toFixed(2) + '%, #b5f6e9 '+ (item.scores['clip_score'].value * 100).toFixed(2) + '%)'}">
59
+ {{item.text}}
60
+ </td>
61
+ <td>{{(item.scores['clip_score'].value * 100).toFixed(2)}}</td>
62
+ </tr>
63
+ </table>
64
+ </p>
65
+ </div>
66
+
67
+ <style>
68
+ #demo-embed {
69
+ font-family: var(--font-stack) !important;
70
+ }
71
+
72
+ .scorebar {
73
+ background: #00bfa5;
74
+ }
75
+
76
+ .gallery-image:hover {
77
+ opacity: 100%;
78
+ }
79
+
80
+ .gallery-image {
81
+ opacity: 50%;
82
+ transition: opacity 0.3s;
83
+ -webkit-transition: opacity 0.3s;
84
+ cursor: pointer;
85
+ }
86
+
87
+ .preview-image {
88
+ width: 100px
89
+ }
90
+
91
+ .thumbnail {
92
+ max-width: 64px;
93
+ max-height: 64px;
94
+ }
95
+
96
+ .embeddingChart {
97
+ margin-top: 30px;
98
+ margin-bottom: 30px;
99
+ }
100
+
101
+ .embeddingBlock {
102
+ width: 8px;
103
+ height: 8px;
104
+ display: inline-flex;
105
+ background: green;
106
+ border-style: solid;
107
+ border-color: white;
108
+ border-width: 1px;
109
+ font-size: 1vmin;
110
+ color: white;
111
+ text-align: center;
112
+ vertical-align: middle;
113
+ justify-content: center;
114
+ align-items: center;
115
+ transition: opacity 0.3s;
116
+ -webkit-transition: opacity 0.3s;
117
+ cursor: pointer;
118
+ }
119
+
120
+ .embeddingBlock:hover {
121
+ border-color: green;
122
+ }
123
+
124
+
125
+ </style>
126
+
127
+ <script>
128
+ function randomIntFromInterval(min, max) { // min and max included
129
+ return Math.floor(Math.random() * (max - min + 1) + min)
130
+ }
131
+
132
+ var app = new Vue({
133
+ el: '#demo-embed',
134
+ data: {
135
+ serverAddress: `https://api.clip.jina.ai:8443`,
136
+ query: 'https://picsum.photos/300',
137
+ embedding: [1, 1, 1],
138
+ max_embed_value: 0,
139
+ min_embed_value: 0,
140
+ elapsed: 0,
141
+ showValue: true,
142
+ images: [],
143
+ matches: [],
144
+ prompts: [
145
+ {"text": "This is a photo of natural scene"},
146
+ {"text": "This is a photo of man-made object"},
147
+ {"text": "This is a photo of an animal"},
148
+ {"text": "This is a photo with human faces"},
149
+ {"text": "This is a blurry photo"},
150
+ {"text": "This is a black and white photo"},
151
+ {"text": "This is a screenshot"},
152
+ ]
153
+ },
154
+ computed: {
155
+ isUrl: function () {
156
+ let url;
157
+
158
+ try {
159
+ url = new URL(this.query);
160
+ } catch (_) {
161
+ return false;
162
+ }
163
+
164
+ return url.protocol === "http:" || url.protocol === "https:" || url.protocol === "data:";
165
+ },
166
+ // get only
167
+ payload: function () {
168
+ return {
169
+ data: [this.isUrl ? {
170
+ uri: this.query,
171
+ matches: this.prompts
172
+ } : {
173
+ text: this.query,
174
+ matches: this.prompts
175
+ }],
176
+ exec_endpoint: '/rank',
177
+ }
178
+ }
179
+ },
180
+ mounted: function () {
181
+ this.$nextTick(function () {
182
+ app.callJina();
183
+
184
+ $.getJSON("https://picsum.photos/v2/list?page=" + randomIntFromInterval(1, 40) + "&limit=10", function (json) {
185
+ app.images = json
186
+ });
187
+
188
+ })
189
+ },
190
+ watch: {
191
+ query: function (newQ, oldQ) {
192
+ this.callJina()
193
+ },
194
+ prompts: {
195
+ handler: function (newQ, oldQ) {
196
+ this.callJina()
197
+ }, deep: true
198
+ },
199
+ },
200
+ methods: {
201
+ encodeImageFileAsURL(element) {
202
+ var file = element.target.files[0];
203
+ var reader = new FileReader();
204
+ reader.onloadend = function () {
205
+ app.query = reader.result
206
+ }
207
+ reader.readAsDataURL(file);
208
+ },
209
+ addPrompt: function () {
210
+ this.prompts.push({"text": "write your prompt here"})
211
+ },
212
+ rmPrompt: function () {
213
+ this.prompts.pop()
214
+ },
215
+ normalize_value(val) {
216
+ r = (val - this.min_embed_value) / (this.max_embed_value - this.min_embed_value)
217
+ r = (r * 10).toFixed(0) / 10
218
+ return r
219
+ },
220
+ callJina: function () {
221
+
222
+ $.ajax({
223
+ headers: {
224
+ Authorization: "d28b93ccbd13367148d05fe3f7fbc680"
225
+ },
226
+ type: "POST",
227
+ url: this.serverAddress + "/post",
228
+ data: JSON.stringify(this.payload),
229
+ contentType: "application/json; charset=utf-8",
230
+ dataType: "json",
231
+ }).success(function (data, textStatus, jqXHR) {
232
+ // data.data[0].embedding
233
+ app.matches = data.data[0].matches
234
+ date1 = new Date(data.routes[0].startTime)
235
+ date2 = new Date(data.routes[0].endTime)
236
+ app.elapsed = date2 - date1
237
+
238
+ }).fail(function () {
239
+ console.error("bad connection!")
240
+ });
241
+ }
242
+ }
243
+ })
244
+
245
+ </script>
clip_as_service/docs/_static/docarray-dark.svg ADDED
clip_as_service/docs/_static/docarray-light.svg ADDED
clip_as_service/docs/_static/favicon.png ADDED

Git LFS Details

  • SHA256: 317ab14e89cd3ed17ba3fddcd70af471f3c7fcc2b1440e8856caef58cf3003c2
  • Pointer size: 130 Bytes
  • Size of remote file: 42.6 kB
clip_as_service/docs/_static/finetuner-dark.svg ADDED
clip_as_service/docs/_static/finetuner-light.svg ADDED
clip_as_service/docs/_static/hub-dark.svg ADDED
clip_as_service/docs/_static/hub-light.svg ADDED
clip_as_service/docs/_static/logo-dark.svg ADDED
clip_as_service/docs/_static/logo-light.svg ADDED
clip_as_service/docs/_static/main.css ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ html.loaded-in-iframe #announcement,
2
+ html.loaded-in-iframe #sidebar-drawer,
3
+ html.loaded-in-iframe footer,
4
+ html.loaded-in-iframe #toc-drawer {
5
+ display: none!important;
6
+ }
7
+
8
+ html.loaded-in-iframe .page .main {
9
+ justify-content: center;
10
+ }
11
+
12
+ .sidebar-logo {
13
+ max-width: 70%;
14
+ }
15
+
16
+
17
+ table.docutils {
18
+ border: thin;
19
+ }
20
+
21
+ table.docutils td, table.docutils th {
22
+ padding: 1rem 1rem;
23
+ }
24
+
25
+ .highlight {
26
+ background: #f5f5f5;
27
+ }
28
+
29
+ h1, h2, h3 {
30
+ margin-top: 3rem;
31
+ }
32
+
33
+ .highlight-console .highlight {
34
+ background: #00232b !important;
35
+ color: whitesmoke;
36
+ }
37
+
38
+ .highlight-text .highlight {
39
+ background: #00232b !important;
40
+ color: whitesmoke;
41
+ }
42
+
43
+ .highlight-json .highlight {
44
+ background: #00232b !important;
45
+ color: whitesmoke;
46
+ }
47
+
48
+ .highlight-shell .highlight {
49
+ background: #00232b !important;
50
+ color: whitesmoke;
51
+ }
52
+
53
+ .highlight-bash .highlight {
54
+ background: #00232b !important;
55
+ color: whitesmoke;
56
+ }
57
+
58
+ .tab-set > input:checked + label {
59
+ border-color: var(--tabs--label-text--active);
60
+ }
61
+
62
+ .tab-set > input:checked + label:hover {
63
+ border-color: var(--tabs--label-text--active);
64
+ }
65
+
66
+
67
+ table code {
68
+ background: var(--color-inline-code-background);
69
+ border: 1px solid var(--color-background-border);
70
+ border-radius: .2em;
71
+ font-size: var(--font-size--small--2);
72
+ padding: .1em .2em;
73
+ }
74
+
75
+ .related-information {
76
+ justify-content: space-between;
77
+ }
78
+
79
+ .social-btn {
80
+ margin: 0 .3em;
81
+ }
82
+
83
+ .social-btn:hover {
84
+ opacity: .5;
85
+ }
86
+
87
+ .social-btns {
88
+ display: inline-block;
89
+ }
90
+
91
+ .announcement {
92
+ background-color: var(--color-brand-primary);
93
+ color: var(--color-background-primary) !important;
94
+ }
95
+
96
+ .announcement a {
97
+ color: inherit;
98
+ text-decoration: none;
99
+ }
100
+
101
+ .announcement a:hover {
102
+ color: inherit;
103
+ text-decoration: underline;
104
+ }
105
+
106
+ .sidebar-ecosys-logo {
107
+ width: 1.2em;
108
+ margin-right: .5em;
109
+ vertical-align: middle
110
+ }
111
+
112
+
113
+ body[data-theme="dark"] .only-dark-line {
114
+ display: inline-block !important;
115
+ }
116
+
117
+ body[data-theme="dark"] .only-light-line {
118
+ display: none !important;
119
+ }
120
+
121
+ body[data-theme="light"] .only-light-line {
122
+ display: inline-block !important;
123
+ }
124
+
125
+ body[data-theme="light"] .only-dark-line {
126
+ display: none !important;
127
+ }
128
+
129
+ body[data-theme="auto"] .only-light-line {
130
+ display: inline-block !important;
131
+ }
132
+
133
+ body[data-theme="auto"] .only-dark-line {
134
+ display: none !important;
135
+ }
136
+
137
+ .color-gradient-card {
138
+ background: linear-gradient(270deg, #22c1c3, #fdbb2d);
139
+ background-size: 200% 200%;
140
+
141
+ -webkit-animation: AnimationName 30s ease infinite;
142
+ -moz-animation: AnimationName 30s ease infinite;
143
+ animation: AnimationName 30s ease infinite;
144
+ }
145
+
146
+ @-webkit-keyframes AnimationName {
147
+ 0%{background-position:0% 50%}
148
+ 50%{background-position:100% 50%}
149
+ 100%{background-position:0% 50%}
150
+ }
151
+ @-moz-keyframes AnimationName {
152
+ 0%{background-position:0% 50%}
153
+ 50%{background-position:100% 50%}
154
+ 100%{background-position:0% 50%}
155
+ }
156
+ @keyframes AnimationName {
157
+ 0%{background-position:0% 50%}
158
+ 50%{background-position:100% 50%}
159
+ 100%{background-position:0% 50%}
160
+ }
161
+
162
+ .version-select {
163
+ font-size: .7em;
164
+ border-radius: 5px;
165
+ cursor: pointer;
166
+ background-color: #fff;
167
+ background-image: linear-gradient(to top, #f9f9f9, #fff 33%);
168
+ border-color: var(--color-background-border);
169
+ height: 1.8em;
170
+ line-height: 1.8em;
171
+ outline: none;
172
+ text-align: center;
173
+ max-width: 7em;
174
+ color: var(--color-foreground-muted);
175
+ }
clip_as_service/docs/_static/now-dark.svg ADDED
clip_as_service/docs/_static/now-light.svg ADDED
clip_as_service/docs/_static/search-dark.svg ADDED
clip_as_service/docs/_static/search-light.svg ADDED
clip_as_service/docs/_templates/page.html ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% extends "base.html" %}
2
+
3
+ {% block body -%}
4
+ {{ super() }}
5
+ {% include "partials/icons.html" %}
6
+
7
+ <input type="checkbox" class="sidebar-toggle" name="__navigation" id="__navigation">
8
+ <input type="checkbox" class="sidebar-toggle" name="__toc" id="__toc">
9
+ <label class="overlay sidebar-overlay" for="__navigation">
10
+ <div class="visually-hidden">Hide navigation sidebar</div>
11
+ </label>
12
+ <label class="overlay toc-overlay" for="__toc">
13
+ <div class="visually-hidden">Hide table of contents sidebar</div>
14
+ </label>
15
+
16
+ {% if theme_announcement -%}
17
+ <div class="announcement">
18
+ <aside class="announcement-content">
19
+ {% block announcement %} {{ theme_announcement }} {% endblock announcement %}
20
+ </aside>
21
+ </div>
22
+ {%- endif %}
23
+
24
+ <div class="page">
25
+ <header class="mobile-header">
26
+ <div class="header-left">
27
+ <label class="nav-overlay-icon" for="__navigation">
28
+ <div class="visually-hidden">Toggle site navigation sidebar</div>
29
+ <i class="icon">
30
+ <svg>
31
+ <use href="#svg-menu"></use>
32
+ </svg>
33
+ </i>
34
+ </label>
35
+ </div>
36
+ <div class="header-center">
37
+ <a href="{{ pathto(master_doc) }}">
38
+ <div class="brand">{{ docstitle if docstitle else project }}</div>
39
+ </a>
40
+ </div>
41
+ <div class="header-right">
42
+ <div class="theme-toggle-container theme-toggle-header">
43
+ <button class="theme-toggle">
44
+ <div class="visually-hidden">Toggle Light / Dark / Auto color theme</div>
45
+ <svg class="theme-icon-when-auto">
46
+ <use href="#svg-sun-half"></use>
47
+ </svg>
48
+ <svg class="theme-icon-when-dark">
49
+ <use href="#svg-moon"></use>
50
+ </svg>
51
+ <svg class="theme-icon-when-light">
52
+ <use href="#svg-sun"></use>
53
+ </svg>
54
+ </button>
55
+ </div>
56
+ <label class="toc-overlay-icon toc-header-icon{% if furo_hide_toc %} no-toc{% endif %}" for="__toc">
57
+ <div class="visually-hidden">Toggle table of contents sidebar</div>
58
+ <i class="icon">
59
+ <svg>
60
+ <use href="#svg-toc"></use>
61
+ </svg>
62
+ </i>
63
+ </label>
64
+ </div>
65
+ </header>
66
+ <aside class="sidebar-drawer">
67
+ <div class="sidebar-container">
68
+ {% block left_sidebar %}
69
+ <div class="sidebar-sticky">
70
+ {%- for sidebar_section in sidebars %}
71
+ {%- include sidebar_section %}
72
+ {%- endfor %}
73
+ </div>
74
+ {% endblock left_sidebar %}
75
+ </div>
76
+ </aside>
77
+ <div class="main">
78
+ <div class="content">
79
+ <div class="article-container">
80
+ <a href="#" class="back-to-top muted-link">
81
+ <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
82
+ <path d="M13 20h-2V8l-5.5 5.5-1.42-1.42L12 4.16l7.92 7.92-1.42 1.42L13 8v12z"></path>
83
+ </svg>
84
+ <span>{% trans %}Back to top{% endtrans %}</span>
85
+ </a>
86
+ <div class="content-icon-container">
87
+ {#- Edit this page, on GitHub -#}
88
+ {%- if READTHEDOCS and conf_py_path and page_source_suffix and github_user != "None" and github_repo
89
+ != "None" and github_version %}
90
+ <div class="edit-this-page">
91
+ <a class="muted-link"
92
+ href="https://github.com/{{ github_user }}/{{ github_repo }}/edit/{{ github_version }}{{ conf_py_path }}{{ pagename }}{{ page_source_suffix }}"
93
+ title="{{ _(" Edit this page") }}">
94
+ <svg aria-hidden="true" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" fill="none"
95
+ stroke-linecap="round" stroke-linejoin="round">
96
+ <path stroke="none" d="M0 0h24v24H0z" fill="none"/>
97
+ <path d="M4 20h4l10.5 -10.5a1.5 1.5 0 0 0 -4 -4l-10.5 10.5v4"/>
98
+ <line x1="13.5" y1="6.5" x2="17.5" y2="10.5"/>
99
+ </svg>
100
+ <span class="visually-hidden">{{ _("Edit this page") }}</span>
101
+ </a>
102
+ </div>
103
+ {% endif %}
104
+ {#- Theme toggle -#}
105
+ <div class="theme-toggle-container theme-toggle-content">
106
+ <button class="theme-toggle">
107
+ <div class="visually-hidden">Toggle Light / Dark / Auto color theme</div>
108
+ <svg class="theme-icon-when-auto">
109
+ <use href="#svg-sun-half"></use>
110
+ </svg>
111
+ <svg class="theme-icon-when-dark">
112
+ <use href="#svg-moon"></use>
113
+ </svg>
114
+ <svg class="theme-icon-when-light">
115
+ <use href="#svg-sun"></use>
116
+ </svg>
117
+ </button>
118
+ </div>
119
+ <label class="toc-overlay-icon toc-content-icon{% if furo_hide_toc %} no-toc{% endif %}"
120
+ for="__toc">
121
+ <div class="visually-hidden">Toggle table of contents sidebar</div>
122
+ <i class="icon">
123
+ <svg>
124
+ <use href="#svg-toc"></use>
125
+ </svg>
126
+ </i>
127
+ </label>
128
+ </div>
129
+ <article role="main">
130
+ {% block content %}{{ body }}{% endblock %}
131
+ </article>
132
+ </div>
133
+ <footer>
134
+ {% block footer %}
135
+ <div class="related-pages">
136
+ {% if next -%}
137
+ <a class="next-page" href="{{ next.link }}">
138
+ <div class="page-info">
139
+ <div class="context">
140
+ <span>{{ _("Next") }}</span>
141
+ </div>
142
+ <div class="title">{{ next.title }}</div>
143
+ </div>
144
+ <svg class="furo-related-icon">
145
+ <use href="#svg-arrow-right"></use>
146
+ </svg>
147
+ </a>
148
+ {%- endif %}
149
+ {% if prev -%}
150
+ <a class="prev-page" href="{{ prev.link }}">
151
+ <svg class="furo-related-icon">
152
+ <use href="#svg-arrow-right"></use>
153
+ </svg>
154
+ <div class="page-info">
155
+ <div class="context">
156
+ <span>{{ _("Previous") }}</span>
157
+ </div>
158
+ {% if prev.link == pathto(master_doc) %}
159
+ <div class="title">{{ _("Home") }}</div>
160
+ {% else %}
161
+ <div class="title">{{ prev.title }}</div>
162
+ {% endif %}
163
+ </div>
164
+ </a>
165
+ {%- endif %}
166
+ </div>
167
+ <div class="bottom-of-page">
168
+ <div class="left-details">
169
+ {%- if show_copyright %}
170
+ <div class="copyright">
171
+ {%- if hasdoc('copyright') %}
172
+ {% trans path=pathto('copyright'), copyright=copyright|e -%}
173
+ <a href="{{ path }}">Copyright</a> &#169; {{ copyright }}
174
+ {%- endtrans %}
175
+ {%- else %}
176
+ {% trans copyright=copyright|e -%}
177
+ Copyright &#169; {{ copyright }}
178
+ {%- endtrans %}
179
+ {%- endif %}
180
+ </div>
181
+ {%- endif %}
182
+ {%- if last_updated -%}
183
+ <div class="last-updated">
184
+ {% trans last_updated=last_updated|e -%}
185
+ Last updated on {{ last_updated }}
186
+ {%- endtrans -%}
187
+ </div>
188
+ {%- endif %}
189
+ </div>
190
+ <div class="right-details">
191
+ <div class="social-btns">
192
+ <a class='social-btn' href="https://github.com/jina-ai/clip-as-service/" aria-label="GitHub"
193
+ target="_blank" rel="noreferrer"> <i class="fab fa-github"></i></a>
194
+ <a class='social-btn' href="https://discord.jina.ai" aria-label="Discord" target="_blank"
195
+ rel="noreferrer"> <i class="fab fa-discord"></i></a>
196
+ <a class='social-btn' href="https://youtube.com/c/jina-ai" aria-label="YouTube"
197
+ target="_blank" rel="noreferrer"> <i class="fab fa-youtube"></i></a>
198
+ <a class='social-btn' href="https://twitter.com/JinaAI_" aria-label="Twitter"
199
+ target="_blank" rel="noreferrer"> <i class="fab fa-twitter"></i></a>
200
+ <a class='social-btn' href="https://www.linkedin.com/company/jinaai/" aria-label="LinkedIn"
201
+ target="_blank" rel="noreferrer"> <i class="fab fa-linkedin"></i></a>
202
+ </div>
203
+ </div>
204
+ </div>
205
+ {% endblock footer %}
206
+ </footer>
207
+ </div>
208
+ <aside class="toc-drawer{% if furo_hide_toc %} no-toc{% endif %}">
209
+ {% block right_sidebar %}
210
+ {% if not furo_hide_toc %}
211
+ <div class="toc-sticky toc-scroll">
212
+ <div class="toc-title-container">
213
+ <span class="toc-title">
214
+ {{ _("Contents") }}
215
+ </span>
216
+ </div>
217
+ <div class="toc-tree-container">
218
+ <div class="toc-tree">
219
+ {{ toc }}
220
+ </div>
221
+ </div>
222
+ </div>
223
+ {% endif %}
224
+ {% endblock right_sidebar %}
225
+ </aside>
226
+ </div>
227
+ </div>
228
+ <img referrerpolicy="no-referrer-when-downgrade"
229
+ src="https://static.scarf.sh/a.png?x-pxid=2823e771-0e1e-4320-8fde-48bc48e53262"/>
230
+ {%- endblock %}
clip_as_service/docs/_templates/sidebar/brand.html ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <a class="sidebar-brand{% if logo %} centered{% endif %}" href="{{ pathto(master_doc) }}">
2
+ {% block brand_content %}
3
+ {%- if logo_url %}
4
+ <div class="sidebar-logo-container">
5
+ <img class="sidebar-logo" src="{{ logo_url }}" alt="Logo" />
6
+ </div>
7
+ {%- endif %}
8
+ {%- if theme_light_logo and theme_dark_logo %}
9
+ <div class="sidebar-logo-container">
10
+ <img class="sidebar-logo only-light" src="{{ pathto('_static/' + theme_light_logo, 1) }}" alt="Light Logo" />
11
+ <img class="sidebar-logo only-dark" src="{{ pathto('_static/' + theme_dark_logo, 1) }}" alt="Dark Logo" />
12
+ </div>
13
+ {%- endif %}
14
+ {% if not theme_sidebar_hide_name %}
15
+ <span class="sidebar-brand-text">{{ docstitle if docstitle else project }}</span>
16
+ {%- endif %}
17
+ {% endblock brand_content %}
18
+ </a>
19
+ <div class="sd-d-flex-row sd-align-major-spaced">
20
+ <a class="github-button" href="https://github.com/jina-ai/clip-as-service" data-icon="octicon-star" data-show-count="true" aria-label="Star jina-ai/jina on GitHub" style="opacity: 0;">Star</a>
21
+ {% if versions %}
22
+ <select onChange="window.location.href=this.value" class="version-select">
23
+ {%- for item in versions|reverse %}
24
+ {% if item.name == latest_jina_version %}
25
+ {% set new_url = item.url if current_version.name == latest_jina_version else item.url | replace('/' + latest_jina_version, "") %}
26
+ {% if current_version.version == item.version %}
27
+ <option value="{{ new_url }}" selected="selected" >latest ({{ item.name }})</option>
28
+ {% else %}
29
+ <option value="{{ new_url }}" >latest({{ item.name }})</option>
30
+ {% endif %}
31
+ {% else %}
32
+ {% if current_version.version == item.version %}
33
+ <option value="{{ item.url }}" selected="selected" >{{ item.name }}</option>
34
+ {% else %}
35
+ <option value="{{ item.url }}" >{{ item.name }}</option>
36
+ {% endif %}
37
+ {% endif %}
38
+ {%- endfor %}
39
+ </select>
40
+ {% endif %}
41
+ </div>
clip_as_service/docs/_templates/sidebar/navigation.html ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div class="sidebar-tree">
2
+ {{ furo_navigation_tree }}
3
+ <p class="caption" role="heading"><span class="caption-text">Ecosystem</span></p>
4
+ <ul>
5
+ <li class="toctree-l1">
6
+ <a class="reference external" href="https://docs.jina.ai">
7
+ <img class="sidebar-ecosys-logo only-light-line" src="{{ pathto('_static/search-light.svg', 1) }}">
8
+ <img class="sidebar-ecosys-logo only-dark-line" src="{{ pathto('_static/search-dark.svg', 1) }}">
9
+ Jina</a></li>
10
+ <li class="toctree-l1"><a class="reference external" href="https://hub.jina.ai">
11
+ <img class="sidebar-ecosys-logo only-light-line" src="{{ pathto('_static/hub-light.svg', 1) }}">
12
+ <img class="sidebar-ecosys-logo only-dark-line" src="{{ pathto('_static/hub-dark.svg', 1) }}">
13
+ Jina Hub</a></li>
14
+ <li class="toctree-l1"><a class="reference external" href="https://finetuner.jina.ai">
15
+ <img class="sidebar-ecosys-logo only-light-line" src="{{ pathto('_static/finetuner-light.svg', 1) }}">
16
+ <img class="sidebar-ecosys-logo only-dark-line" src="{{ pathto('_static/finetuner-dark.svg', 1) }}">
17
+ Finetuner</a></li>
18
+ <li class="toctree-l1"><a class="reference external" href="https://docarray.jina.ai">
19
+ <img class="sidebar-ecosys-logo only-light-line" src="{{ pathto('_static/docarray-light.svg', 1) }}">
20
+ <img class="sidebar-ecosys-logo only-dark-line" src="{{ pathto('_static/docarray-dark.svg', 1) }}">
21
+ DocArray</a></li>
22
+ <li class="toctree-l1"><a class="reference internal" href="#">
23
+ <img class="sidebar-ecosys-logo only-light-line" src="{{ pathto('_static/cas-light.svg', 1) }}">
24
+ <img class="sidebar-ecosys-logo only-dark-line" src="{{ pathto('_static/cas-dark.svg', 1) }}">
25
+ CLIP-as-service</a></li>
26
+ <li class="toctree-l1"><a class="reference external" href="https://github.com/jina-ai/jcloud">
27
+ <img class="sidebar-ecosys-logo only-light-line" src="{{ pathto('_static/JCloud-light.svg', 1) }}">
28
+ <img class="sidebar-ecosys-logo only-dark-line" src="{{ pathto('_static/JCloud-dark.svg', 1) }}">
29
+ JCloud</a></li>
30
+ <li class="toctree-l1"><a class="reference external" href="https://now.jina.ai">
31
+ <img class="sidebar-ecosys-logo only-light-line" src="{{ pathto('_static/now-light.svg', 1) }}">
32
+ <img class="sidebar-ecosys-logo only-dark-line" src="{{ pathto('_static/now-dark.svg', 1) }}">
33
+ NOW</a></li>
34
+ </ul>
35
+ </div>
clip_as_service/docs/changelog/index.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Changelog
2
+
3
+ CLIP-as-service follows semantic versioning. However, before the project reach 1.0.0, any breaking change will only bump the minor version. An automated release note is [generated on every release](https://github.com/jina-ai/clip-as-service/releases). The release note includes features, bugs, refactorings etc.
4
+
5
+ This chapter only tracks the most important breaking changes and explain the rationale behind them.
6
+
7
+ ## 0.4.0: rename `rerank` concept to `rank`
8
+
9
+ "Reranking" is a new feature introduced since 0.3.3. This feature allows user to rank and score `document.matches` in a cross-modal way. From 0.4.0, this feature as well as all related functions will refer it simply as "rank".
10
+
11
+ ## 0.2.0: improve the service scalability with replicas
12
+
13
+ This change is mainly intended to improve the inference performance with replicas.
14
+
15
+ Here is the short benchmark summary of the improvement (`replicas=4`):
16
+
17
+ | batch_size | before | after |
18
+ |-------------|--------|---------|
19
+ | 1 | 23.74 | 18.89 |
20
+ | 8 | 58.88 | 30.38 |
21
+ | 16 | 14.96 | 91.86 |
22
+ | 32 | 14.78 | 101.75 |
clip_as_service/docs/conf.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import sys
4
+ from os import path
5
+
6
+ sys.path.insert(0, path.abspath('..'))
7
+
8
+ project = 'CLIP-as-service'
9
+ slug = re.sub(r'\W+', '-', project.lower())
10
+ author = 'Jina AI'
11
+ copyright = 'Jina AI Limited. All rights reserved.'
12
+ source_suffix = ['.rst', '.md']
13
+ master_doc = 'index'
14
+ language = 'en'
15
+ repo_dir = '../'
16
+
17
+ try:
18
+ if 'CAS_VERSION' not in os.environ:
19
+ libinfo_py = path.join(repo_dir, 'client/clip_client', '__init__.py')
20
+ libinfo_content = open(libinfo_py, 'r').readlines()
21
+ version_line = [
22
+ l.strip() for l in libinfo_content if l.startswith('__version__')
23
+ ][0]
24
+ exec(version_line)
25
+ else:
26
+ __version__ = os.environ['CAS_VERSION']
27
+ except FileNotFoundError:
28
+ __version__ = '0.0.0'
29
+
30
+ version = __version__
31
+ release = __version__
32
+
33
+ templates_path = ['_templates']
34
+ exclude_patterns = [
35
+ '_build',
36
+ 'Thumbs.db',
37
+ '.DS_Store',
38
+ 'tests',
39
+ 'page_templates',
40
+ '.github',
41
+ ]
42
+ pygments_style = 'rainbow_dash'
43
+ html_theme = 'furo'
44
+
45
+ base_url = '/'
46
+ html_baseurl = 'https://clip-as-service.jina.ai'
47
+ sitemap_url_scheme = '{link}'
48
+ sitemap_locales = [None]
49
+ sitemap_filename = "sitemap.xml"
50
+
51
+ html_theme_options = {
52
+ 'light_logo': 'logo-light.svg',
53
+ 'dark_logo': 'logo-dark.svg',
54
+ "sidebar_hide_name": True,
55
+ "light_css_variables": {
56
+ "color-brand-primary": "#009191",
57
+ "color-brand-content": "#009191",
58
+ },
59
+ "dark_css_variables": {
60
+ "color-brand-primary": "#FBCB67",
61
+ "color-brand-content": "#FBCB67",
62
+ },
63
+ # PLEASE DO NOT DELETE the empty line between `start-announce` and `end-announce`
64
+ # PLEASE DO NOT DELETE `start-announce`/ `end-announce` it is used for our dev bot to inject announcement from GH
65
+ # start-announce
66
+ # end-announce
67
+ }
68
+
69
+ html_static_path = ['_static']
70
+ html_extra_path = ['html_extra']
71
+ html_css_files = [
72
+ 'main.css',
73
+ 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta2/css/all.min.css',
74
+ ]
75
+ html_js_files = [
76
+ 'https://cdn.jsdelivr.net/npm/vue@2/dist/vue.min.js',
77
+ ]
78
+ htmlhelp_basename = slug
79
+ html_show_sourcelink = False
80
+ html_favicon = '_static/favicon.png'
81
+
82
+ intersphinx_mapping = {'docarray': ('https://docarray.jina.ai/', None), 'finetuner': ('https://finetuner.jina.ai/', None)}
83
+
84
+ latex_documents = [(master_doc, f'{slug}.tex', project, author, 'manual')]
85
+ man_pages = [(master_doc, slug, project, [author], 1)]
86
+ texinfo_documents = [
87
+ (master_doc, slug, project, author, slug, project, 'Miscellaneous')
88
+ ]
89
+ epub_title = project
90
+ epub_exclude_files = ['search.html']
91
+
92
+ # -- Extension configuration -------------------------------------------------
93
+
94
+ extensions = [
95
+ 'sphinx.ext.autodoc',
96
+ 'sphinx_autodoc_typehints',
97
+ 'sphinx.ext.viewcode',
98
+ 'sphinx.ext.coverage',
99
+ 'sphinxcontrib.apidoc',
100
+ 'sphinxarg.ext',
101
+ 'sphinx_copybutton',
102
+ 'sphinx_sitemap',
103
+ 'sphinx.ext.intersphinx',
104
+ 'sphinxext.opengraph',
105
+ 'notfound.extension',
106
+ 'myst_parser',
107
+ 'sphinx_design',
108
+ 'sphinx_inline_tabs',
109
+ ]
110
+
111
+ myst_enable_extensions = ['colon_fence', 'substitution', 'deflist']
112
+
113
+ # -- Custom 404 page
114
+
115
+ # sphinx-notfound-page
116
+ # https://github.com/readthedocs/sphinx-notfound-page
117
+ notfound_context = {
118
+ 'title': 'Page Not Found',
119
+ 'body': '''
120
+ <h1>Page Not Found</h1>
121
+ <p>Oops, we couldn't find that page. </p>
122
+ <p>You can try "asking our docs" on the right corner of the page to find answer.</p>
123
+ <p>Otherwise, <a href="https://github.com/jina-ai/clip-as-service/">please create a Github issue</a> and one of our team will respond.</p>
124
+
125
+ ''',
126
+ }
127
+ notfound_no_urls_prefix = True
128
+
129
+ apidoc_module_dir = '../client'
130
+ apidoc_output_dir = 'api'
131
+ apidoc_excluded_paths = ['tests', 'legacy', 'hub', 'toy*', 'setup.py']
132
+ apidoc_separate_modules = True
133
+ apidoc_extra_args = ['-t', 'template/']
134
+ autodoc_member_order = 'bysource'
135
+ autodoc_mock_imports = ['argparse', 'numpy', 'np', 'tensorflow', 'torch', 'scipy']
136
+ autoclass_content = 'both'
137
+ set_type_checking_flag = False
138
+ html_last_updated_fmt = ''
139
+ nitpicky = True
140
+ nitpick_ignore = [('py:class', 'type')]
141
+ linkcheck_ignore = [
142
+ # Avoid link check on local uri
143
+ 'http://0.0.0.0:*',
144
+ 'pods/encode.yml',
145
+ 'https://github.com/jina-ai/clip-as-service/commit/*',
146
+ '.github/*',
147
+ 'extra-requirements.txt',
148
+ 'fastentrypoints.py' '../../101',
149
+ '../../102',
150
+ 'http://www.twinsun.com/tz/tz-link.htm', # Broken link from pytz library
151
+ 'https://urllib3.readthedocs.io/en/latest/contrib.html#google-app-engine', # Broken link from urllib3 library
152
+ 'https://linuxize.com/post/how-to-add-swap-space-on-ubuntu-20-04/',
153
+ # This link works but gets 403 error on linkcheck
154
+ ]
155
+ linkcheck_timeout = 20
156
+ linkcheck_retries = 2
157
+ linkcheck_anchors = False
158
+
159
+ ogp_site_url = 'https://clip-as-service.jina.ai/'
160
+ ogp_image = 'https://clip-as-service.jina.ai/_static/banner.png'
161
+ ogp_use_first_image = True
162
+ ogp_description_length = 300
163
+ ogp_type = 'website'
164
+ ogp_site_name = f'CLIP-as-service {os.environ.get("SPHINX_MULTIVERSION_VERSION", version)} Documentation'
165
+
166
+ ogp_custom_meta_tags = [
167
+ '<meta name="twitter:card" content="summary_large_image">',
168
+ '<meta name="twitter:site" content="@JinaAI_">',
169
+ '<meta name="twitter:creator" content="@JinaAI_">',
170
+ '<meta name="description" content="Embed images and sentences into fixed-length vectors via CLIP.">',
171
+ '<meta property="og:description" content="CLIP-as-service is a low-latency high-scalability embedding service for images and texts. It can be easily integrated as a microservice into neural search solutions.">',
172
+ '''
173
+
174
+ <script async src="https://www.googletagmanager.com/gtag/js?id=G-E63SXVNDXZ"></script>
175
+ <script>
176
+ window.dataLayer = window.dataLayer || [];
177
+ function gtag(){dataLayer.push(arguments);}
178
+ gtag('js', new Date());
179
+
180
+ gtag('config', 'G-E63SXVNDXZ');
181
+ </script>
182
+
183
+ <script async defer src="https://buttons.github.io/buttons.js"></script>
184
+ ''',
185
+ ]
186
+
187
+
188
+ def add_server_address(app):
189
+ # This makes variable `server_address` available to docbot.js
190
+ server_address = app.config['server_address']
191
+ js_text = "var server_address = '%s';" % server_address
192
+ app.add_js_file(None, body=js_text)
193
+
194
+
195
+ def setup(app):
196
+ from sphinx.domains.python import PyField
197
+ from sphinx.util.docfields import Field
198
+ from sphinx.locale import _
199
+
200
+ app.add_object_type(
201
+ 'confval',
202
+ 'confval',
203
+ objname='configuration value',
204
+ indextemplate='pair: %s; configuration value',
205
+ doc_field_types=[
206
+ PyField(
207
+ 'type',
208
+ label=_('Type'),
209
+ has_arg=False,
210
+ names=('type',),
211
+ bodyrolename='class',
212
+ ),
213
+ Field(
214
+ 'default',
215
+ label=_('Default'),
216
+ has_arg=False,
217
+ names=('default',),
218
+ ),
219
+ ],
220
+ )
clip_as_service/docs/hosting/by-jina.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hosted by Jina AI
2
+
3
+ ```{include} ../../README.md
4
+ :start-after: <!-- start inference-banner -->
5
+ :end-before: <!-- end inference-banner -->
6
+ ```
7
+
8
+ In today's dynamic business environment, enterprises face a multitude of challenges that require advanced solutions to
9
+ maintain a competitive edge.
10
+ From managing vast amounts of unstructured data to delivering personalized customer experiences, businesses need
11
+ efficient tools to tackle these obstacles.
12
+ Machine learning (ML) has emerged as a powerful tool for automating repetitive tasks, processing data effectively, and
13
+ generating valuable insights from multimedia content.
14
+ Jina AI's Inference offers a comprehensive solution to streamline access to curated, state-of-the-art ML models,
15
+ eliminating traditional roadblocks such as costly and time-consuming MLOps steps and the distinction between public and
16
+ custom neural network models.
17
+
18
+ ## Getting started
19
+
20
+ To access the fastest and most performant CLIP models, [Jina AI's Inference](https://cloud.jina.ai/user/inference) is
21
+ the go-to choice.
22
+ Follow the steps below to get started:
23
+
24
+ 1. Sign up for a free account at [Jina AI Cloud](https://cloud.jina.ai).
25
+ 2. Once you have created an account, navigate to the Inference tab to create a new CLIP model.
26
+ 3. The model can be accessed either through an HTTP endpoint or a gRPC endpoint.
27
+
28
+ ## Obtaining a Personal Access Token
29
+
30
+ Before you begin using [Jina AI's Inference](https://cloud.jina.ai/user/inference), ensure that you have obtained a
31
+ personal access token (PAT) from the [Jina AI Cloud](https://cloud.jina.ai) or through the command-line interface (CLI).
32
+ Use the following guide to create a new PAT:
33
+
34
+ 1. Access the [Jina AI Cloud](https://cloud.jina.ai) and log in to your account.
35
+ 2. Navigate to the [**Access token**](https://cloud.jina.ai/settings/tokens) section in the **Settings** tab, or alternatively, create a PAT via the CLI using the command:
36
+
37
+ ```bash
38
+ jina auth token create <name of PAT> -e <expiration days>
39
+ ```
40
+
41
+ ## Installing the Inference Client
42
+
43
+ To interact with the model created in Inference, you will need to install the `inference-client` Python package.
44
+ Follow the steps below to install the package using pip:
45
+
46
+ ```bash
47
+ pip install inference-client
48
+ ```
49
+
50
+ ## Interacting with the Model
51
+
52
+ Once you have your personal access token and the model name listed in the Inference detail page, you can start
53
+ interacting with the model using the `inference-client` Python package.
54
+ Follow the example code snippet below:
55
+
56
+ ```python
57
+ from inference_client import Client
58
+
59
+ client = Client(token='<your auth token>')
60
+
61
+ model = client.get_model('<your model name>')
62
+ ```
63
+
64
+ The CLIP models offer the following functionalities:
65
+
66
+ 1. Encoding: Users can encode data by calling the `model.encode` method. For detailed instructions on using this method, refer to the [Encode documentation](https://jina.readme.io/docs/encode).
67
+ 2. Ranking: Users can perform ranking by calling the `model.rank` method. Refer to the [Rank documentation](https://jina.readme.io/docs/rank) for detailed instructions on using this method.
68
+
69
+ For further details on usage and information about other tasks and models supported in Inference, as well as how to use
70
+ `curl` to interact with the model, please consult the [Inference documentation](https://jina.readme.io/docs/inference).
clip_as_service/docs/hosting/cas-on-colab.svg ADDED
clip_as_service/docs/hosting/colab-banner.png ADDED

Git LFS Details

  • SHA256: e7701cf44db53431dab93c82cf138b632c3fea613456a99fbcb5a74297d250b3
  • Pointer size: 131 Bytes
  • Size of remote file: 419 kB
clip_as_service/docs/hosting/colab.md ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Host on Google Colab
2
+
3
+ ```{figure} https://clip-as-service.jina.ai/_images/colab-banner.png
4
+ :width: 0 %
5
+ :scale: 0 %
6
+ ```
7
+
8
+ ```{figure} colab-banner.png
9
+ :scale: 0 %
10
+ :width: 0 %
11
+ ```
12
+
13
+
14
+ As [Jina is fully compatible to Google Colab](https://docs.jina.ai/how-to/google-colab/), CLIP-as-service can be run smoothly on Colab as well. One can host `clip_server` on Google Colab by leveraging its free GPU/TPU resources and open up to 4 replicas of `ViT-L/14-336px`. Then you can send request from local to the server for embedding, ranking and reasoning tasks.
15
+
16
+ Specifically, the architecture is illustrated below:
17
+
18
+ ```{figure} cas-on-colab.svg
19
+ :width: 70%
20
+ ```
21
+
22
+ ```{button-link} https://colab.research.google.com/github/jina-ai/clip-as-service/blob/main/docs/hosting/cas-on-colab.ipynb
23
+ :color: primary
24
+ :align: center
25
+
26
+ {octicon}`link-external` Open the notebook on Google Colab
27
+ ```
28
+
29
+ Please follow the walk-through there. Enjoy the free GPU/TPU to build your awesome Jina applications!
30
+
31
+
32
+ ```{tip}
33
+ Hosing service on Google Colab is not recommended if you server aims to be long-live or permanent. It is often used for quick experiment, demonstration or leveraging its free GPU/TPU. For stable, please deploy the CLIP model on your own server.
34
+ ```
35
+
36
+
37
+
clip_as_service/docs/hosting/jc-deploy.png ADDED

Git LFS Details

  • SHA256: 364d74ec4c81c0cd6e5e434c5fee670602fba8b014853cd7aa3db84394494668
  • Pointer size: 130 Bytes
  • Size of remote file: 32.9 kB
clip_as_service/docs/hosting/on-jcloud.md ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Host on JCloud
2
+
3
+ Essentially `clip_server` is a Jina [Flow](https://docs.jina.ai/fundamentals/flow/). Any Jina Flow can be hosted on [JCloud](https://docs.jina.ai/fundamentals/jcloud/), hence `clip_server` can be hosted on JCloud as well. Learn more about [JCloud here](https://docs.jina.ai/fundamentals/jcloud/).
4
+
5
+
6
+ First, you need a Flow YAML file for deploy. A minimum YAML file is as follows:
7
+
8
+ ````{tab} torch-flow.yml
9
+
10
+ ```yaml
11
+ jtype: Flow
12
+ executors:
13
+ - uses: jinahub+docker://CLIPTorchEncoder
14
+ ```
15
+
16
+ ````
17
+ ````{tab} onnx-flow.yml
18
+
19
+ ```yaml
20
+ jtype: Flow
21
+ executors:
22
+ - uses: jinahub+docker://CLIPOnnxEncoder
23
+ ```
24
+
25
+ ````
26
+
27
+ ```{tip}
28
+ `port` is unnecessary here as JCloud will assign a new hostname and port for any deployed service.
29
+ ```
30
+
31
+ Executors must start with `jinahub+docker://` as it is required by JCloud. We currently provide containerized executors [`jinahub+docker://CLIPTorchEncoder`](https://cloud.jina.ai/executor/gzpbl8jh) and [`jinahub+docker://CLIPOnnxEncoder`](https://cloud.jina.ai/executor/2a7auwg2) on Jina Hub. They are automatically synced on the new release of `clip_server` module.
32
+
33
+ To enable GPU on JCloud, you need to configure it in the YAML file and use prebuilt docker GPU images. For example,
34
+
35
+ ```yaml
36
+ jtype: Flow
37
+ executors:
38
+ - uses: jinahub+docker://CLIPTorchEncoder/latest-gpu
39
+ jcloud:
40
+ resources:
41
+ gpu: shared
42
+ ```
43
+
44
+ Please refer [here](https://docs.jina.ai/fundamentals/jcloud/yaml-spec/#gpu) for more details on using GPU in JCloud.
45
+ Notice that you must specify a docker image GPU tag for your executor to utilize the GPU. For example `latest-gpu`.
46
+ See the 'Tag' section in [CLIPTorchEncoder](https://cloud.jina.ai/executor/gzpbl8jh) and [CLIPOnnxEncoder](https://cloud.jina.ai/executor/2a7auwg2) for docker image GPU tags.
47
+
48
+ To deploy,
49
+
50
+ ````{tab} PyTorch-backed
51
+ ```bash
52
+ jc deploy torch-flow.yml
53
+ ```
54
+ ````
55
+
56
+ ````{tab} ONNX-backed
57
+ ```bash
58
+ jc deploy onnx-flow.yml
59
+ ```
60
+ ````
61
+
62
+
63
+ If Flow is successfully deployed you will see:
64
+
65
+ ```{figure} jc-deploy.png
66
+ :width: 60%
67
+ ```
68
+
69
+ You can now connect to it via client by setting `server` as the URL given by JCloud:
70
+
71
+ ```python
72
+ from clip_client import Client
73
+
74
+ c = Client(
75
+ 'grpcs://174eb69ba3.wolf.jina.ai'
76
+ ) # This is the URL you get from previous step
77
+ c.profile()
78
+ ```
clip_as_service/docs/html_extra/robots.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ User-agent: *
2
+ sitemap: https://clip-as-service.jina.ai/sitemap.xml
clip_as_service/docs/index.md ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Welcome to CLIP-as-service!
2
+
3
+
4
+ ```{include} ../README.md
5
+ :start-after: <!-- start elevator-pitch -->
6
+ :end-before: <!-- end elevator-pitch -->
7
+ ```
8
+
9
+ ## Try it!
10
+
11
+ ## Install
12
+
13
+ ![PyPI](https://img.shields.io/pypi/v/clip_client?color=%23ffffff&label=%20) is the latest version.
14
+
15
+ Make sure you are using Python 3.7+. You can install the client and server independently. It is **not required** to install both: e.g. you can install `clip_server` on a GPU machine and `clip_client` on a local laptop.
16
+
17
+ ````{tab} Client
18
+
19
+ ```bash
20
+ pip install clip-client
21
+ ```
22
+
23
+ ````
24
+
25
+ ````{tab} Server (PyTorch)
26
+
27
+ ```bash
28
+ pip install clip-server
29
+ ```
30
+ ````
31
+
32
+ ````{tab} Server (ONNX)
33
+
34
+ ```bash
35
+ pip install "clip_server[onnx]"
36
+ ```
37
+
38
+ ````
39
+
40
+
41
+ ````{tab} Server (TensorRT)
42
+
43
+ ```bash
44
+ pip install nvidia-pyindex
45
+ pip install "clip_server[tensorrt]"
46
+ ```
47
+ ````
48
+
49
+ ````{tab} Server on Google Colab
50
+
51
+ ```{button-link} https://colab.research.google.com/github/jina-ai/clip-as-service/blob/main/docs/hosting/cas-on-colab.ipynb
52
+ :color: primary
53
+ :align: center
54
+
55
+ {octicon}`link-external` Open the notebook on Google Colab
56
+ ```
57
+
58
+ ````
59
+
60
+
61
+ ## Quick check
62
+
63
+ After installing, you can run the following commands for a quick connectivity check.
64
+
65
+ ### Start the server
66
+
67
+ ````{tab} Start PyTorch Server
68
+ ```bash
69
+ python -m clip_server
70
+ ```
71
+ ````
72
+
73
+ ````{tab} Start ONNX Server
74
+ ```bash
75
+ python -m clip_server onnx-flow.yml
76
+ ```
77
+ ````
78
+
79
+ ````{tab} Start TensorRT Server
80
+ ```bash
81
+ python -m clip_server tensorrt-flow.yml
82
+ ```
83
+ ````
84
+
85
+ At the first time starting the server, it will download the default pretrained model, which may take a while depending on your network speed. Then you will get the address information similar to the following:
86
+
87
+ ```text
88
+ ╭────────────── 🔗 Endpoint ───────────────╮
89
+ │ 🔗 Protocol GRPC │
90
+ │ 🏠 Local 0.0.0.0:51000 │
91
+ │ 🔒 Private 192.168.31.62:51000 │
92
+ | 🌍 Public 87.105.159.191:51000 |
93
+ ╰──────────────────────────────────────────╯
94
+ ```
95
+
96
+ This means the server is ready to serve. Note down the three addresses shown above, you will need them later.
97
+
98
+ ### Connect from client
99
+
100
+ ```{tip}
101
+ Depending on the location of the client and server. You may use different IP addresses:
102
+ - Client and server are on the same machine: use local address, e.g. `0.0.0.0`
103
+ - Client and server are connected to the same router: use private network address, e.g. `192.168.3.62`
104
+ - Server is in public network: use public network address, e.g. `87.105.159.191`
105
+ ```
106
+
107
+ Run the following Python script:
108
+
109
+ ```python
110
+ from clip_client import Client
111
+
112
+ c = Client('grpc://0.0.0.0:51000')
113
+ c.profile()
114
+ ```
115
+
116
+ will give you:
117
+
118
+ ```text
119
+ Roundtrip 16ms 100%
120
+ ├── Client-server network 8ms 49%
121
+ └── Server 8ms 51%
122
+ ├── Gateway-CLIP network 2ms 25%
123
+ └── CLIP model 6ms 75%
124
+ {'Roundtrip': 15.684750003856607, 'Client-server network': 7.684750003856607, 'Server': 8, 'Gateway-CLIP network': 2, 'CLIP model': 6}
125
+ ```
126
+
127
+ It means the client and the server are now connected. Well done!
128
+
129
+
130
+ ```{include} ../README.md
131
+ :start-after: <!-- start support-pitch -->
132
+ :end-before: <!-- end support-pitch -->
133
+ ```
134
+
135
+
136
+ ```{toctree}
137
+ :caption: User Guides
138
+ :hidden:
139
+
140
+ user-guides/client
141
+ user-guides/server
142
+ user-guides/benchmark
143
+ user-guides/retriever
144
+ user-guides/faq
145
+ ```
146
+
147
+ ```{toctree}
148
+ :caption: Hosting
149
+ :hidden:
150
+
151
+
152
+ hosting/colab
153
+ ```
154
+
155
+ ```{toctree}
156
+ :caption: Playground
157
+ :hidden:
158
+
159
+ playground/embedding
160
+ playground/reasoning
161
+ playground/searching
162
+ ```
163
+
164
+
165
+ ```{toctree}
166
+ :caption: Developer References
167
+ :hidden:
168
+ :maxdepth: 1
169
+
170
+ api/clip_client
171
+ ```
172
+
173
+
174
+ ---
175
+ {ref}`genindex` | {ref}`modindex`
176
+
clip_as_service/docs/makedoc.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ set -ex
4
+
5
+ rm -rf api && make clean
6
+
7
+ make dirhtml
clip_as_service/docs/playground/embedding.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Text & Image Embedding
2
+
3
+ Embedding is a basic task in CLIP-as-service. It means converting your input sentence or image into a fixed-length vector. In this demo, you can choose a picture, input a sentence in the textbox, or copy-paste your image URL into the text box to get a rough feeling how CLIP-as-service works.
4
+
5
+ This is *not* a search task. The images are random stock images and are related to any search results, they are mainly for saving your time on finding some random internet cat pictures.
6
+
7
+ The model is `ViT-L/14-336px` on one GPU.
8
+
9
+ <iframe frameborder="0" allowtransparency="true" scrolling="no" src="../../_static/demo-embed.html" style="overflow:hidden;overflow-x:hidden;overflow-y:hidden;height:100vh;width:100%"></iframe>
10
+
11
+ ```{button-link} ../../_static/demo-embed.html
12
+ :color: primary
13
+ :align: center
14
+
15
+ {octicon}`link-external` Open this playground in a new window
16
+ ```