davanstrien's picture
davanstrien HF staff
chore: Add requirements for vllm, outlines, llama_index, datasets, and rich
4dc4bf5
raw
history blame
10 kB
# This file was autogenerated by uv via the following command:
# uv pip compile requirements.in -o requirements.txt
aiohttp==3.9.5
# via
# datasets
# fsspec
# llama-index-core
# llama-index-legacy
# vllm
aiosignal==1.3.1
# via
# aiohttp
# ray
annotated-types==0.7.0
# via pydantic
anyio==4.4.0
# via
# httpx
# openai
# starlette
# watchfiles
async-timeout==4.0.3
# via aiohttp
attrs==23.2.0
# via
# aiohttp
# jsonschema
# referencing
beautifulsoup4==4.12.3
# via llama-index-readers-file
certifi==2024.6.2
# via
# httpcore
# httpx
# requests
charset-normalizer==3.3.2
# via requests
click==8.1.7
# via
# nltk
# ray
# typer
# uvicorn
cloudpickle==3.0.0
# via outlines
cmake==3.29.5.1
# via vllm
dataclasses-json==0.6.7
# via
# llama-index-core
# llama-index-legacy
datasets==2.19.2
# via
# -r requirements.in
# outlines
deprecated==1.2.14
# via
# llama-index-core
# llama-index-legacy
dill==0.3.7
# via
# datasets
# multiprocess
dirtyjson==1.0.8
# via
# llama-index-core
# llama-index-legacy
diskcache==5.6.3
# via outlines
distro==1.9.0
# via openai
dnspython==2.6.1
# via email-validator
email-validator==2.1.1
# via fastapi
exceptiongroup==1.2.1
# via anyio
fastapi==0.111.0
# via vllm
fastapi-cli==0.0.4
# via fastapi
filelock==3.15.1
# via
# datasets
# huggingface-hub
# ray
# torch
# transformers
# triton
# vllm
frozenlist==1.4.1
# via
# aiohttp
# aiosignal
# ray
fsspec==2024.3.1
# via
# datasets
# huggingface-hub
# llama-index-core
# llama-index-legacy
# torch
greenlet==3.0.3
# via sqlalchemy
h11==0.14.0
# via
# httpcore
# uvicorn
httpcore==1.0.5
# via httpx
httptools==0.6.1
# via uvicorn
httpx==0.27.0
# via
# fastapi
# llama-index-core
# llama-index-legacy
# llamaindex-py-client
# openai
huggingface-hub==0.23.3
# via
# datasets
# tokenizers
# transformers
idna==3.7
# via
# anyio
# email-validator
# httpx
# requests
# yarl
interegular==0.3.3
# via
# lm-format-enforcer
# outlines
jinja2==3.1.4
# via
# fastapi
# outlines
# torch
joblib==1.4.2
# via nltk
jsonschema==4.22.0
# via
# outlines
# ray
jsonschema-specifications==2023.12.1
# via jsonschema
lark==1.1.9
# via outlines
llama-index==0.10.44
# via -r requirements.in
llama-index-agent-openai==0.2.7
# via
# llama-index
# llama-index-program-openai
llama-index-cli==0.1.12
# via llama-index
llama-index-core==0.10.44
# via
# llama-index
# llama-index-agent-openai
# llama-index-cli
# llama-index-embeddings-openai
# llama-index-indices-managed-llama-cloud
# llama-index-llms-openai
# llama-index-multi-modal-llms-openai
# llama-index-program-openai
# llama-index-question-gen-openai
# llama-index-readers-file
# llama-index-readers-llama-parse
# llama-parse
llama-index-embeddings-openai==0.1.10
# via
# llama-index
# llama-index-cli
llama-index-indices-managed-llama-cloud==0.1.6
# via llama-index
llama-index-legacy==0.9.48
# via llama-index
llama-index-llms-openai==0.1.22
# via
# llama-index
# llama-index-agent-openai
# llama-index-cli
# llama-index-multi-modal-llms-openai
# llama-index-program-openai
# llama-index-question-gen-openai
llama-index-multi-modal-llms-openai==0.1.6
# via llama-index
llama-index-program-openai==0.1.6
# via
# llama-index
# llama-index-question-gen-openai
llama-index-question-gen-openai==0.1.3
# via llama-index
llama-index-readers-file==0.1.25
# via llama-index
llama-index-readers-llama-parse==0.1.4
# via llama-index
llama-parse==0.4.4
# via llama-index-readers-llama-parse
llamaindex-py-client==0.1.19
# via
# llama-index-core
# llama-index-indices-managed-llama-cloud
llvmlite==0.42.0
# via numba
lm-format-enforcer==0.10.1
# via vllm
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.5
# via jinja2
marshmallow==3.21.3
# via dataclasses-json
mdurl==0.1.2
# via markdown-it-py
mpmath==1.3.0
# via sympy
msgpack==1.0.8
# via ray
multidict==6.0.5
# via
# aiohttp
# yarl
multiprocess==0.70.15
# via datasets
mypy-extensions==1.0.0
# via typing-inspect
nest-asyncio==1.6.0
# via
# llama-index-core
# llama-index-legacy
# outlines
networkx==3.2.1
# via
# llama-index-core
# llama-index-legacy
# torch
ninja==1.11.1.1
# via vllm
nltk==3.8.1
# via
# llama-index-core
# llama-index-legacy
numba==0.59.1
# via outlines
numpy==1.26.4
# via
# datasets
# llama-index-core
# llama-index-legacy
# numba
# outlines
# pandas
# pyarrow
# transformers
# vllm
# xformers
nvidia-cublas-cu12==12.1.3.1
# via
# nvidia-cudnn-cu12
# nvidia-cusolver-cu12
# torch
nvidia-cuda-cupti-cu12==12.1.105
# via torch
nvidia-cuda-nvrtc-cu12==12.1.105
# via torch
nvidia-cuda-runtime-cu12==12.1.105
# via torch
nvidia-cudnn-cu12==8.9.2.26
# via torch
nvidia-cufft-cu12==11.0.2.54
# via torch
nvidia-curand-cu12==10.3.2.106
# via torch
nvidia-cusolver-cu12==11.4.5.107
# via torch
nvidia-cusparse-cu12==12.1.0.106
# via
# nvidia-cusolver-cu12
# torch
nvidia-ml-py==12.555.43
# via vllm
nvidia-nccl-cu12==2.20.5
# via torch
nvidia-nvjitlink-cu12==12.5.40
# via
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
nvidia-nvtx-cu12==12.1.105
# via torch
openai==1.34.0
# via
# llama-index-agent-openai
# llama-index-core
# llama-index-legacy
# vllm
orjson==3.10.4
# via fastapi
outlines==0.0.43
# via
# -r requirements.in
# vllm
packaging==24.1
# via
# datasets
# huggingface-hub
# lm-format-enforcer
# marshmallow
# ray
# transformers
pandas==2.2.2
# via
# datasets
# llama-index-core
# llama-index-legacy
pillow==10.3.0
# via
# llama-index-core
# vllm
prometheus-client==0.20.0
# via
# prometheus-fastapi-instrumentator
# vllm
prometheus-fastapi-instrumentator==7.0.0
# via vllm
protobuf==5.27.1
# via ray
psutil==5.9.8
# via vllm
py-cpuinfo==9.0.0
# via vllm
pyairports==2.1.1
# via outlines
pyarrow==16.1.0
# via datasets
pyarrow-hotfix==0.6
# via datasets
pycountry==24.6.1
# via outlines
pydantic==2.7.4
# via
# fastapi
# llamaindex-py-client
# lm-format-enforcer
# openai
# outlines
# vllm
pydantic-core==2.18.4
# via pydantic
pygments==2.18.0
# via rich
pypdf==4.2.0
# via llama-index-readers-file
python-dateutil==2.9.0.post0
# via pandas
python-dotenv==1.0.1
# via uvicorn
python-multipart==0.0.9
# via fastapi
pytz==2024.1
# via pandas
pyyaml==6.0.1
# via
# datasets
# huggingface-hub
# llama-index-core
# lm-format-enforcer
# ray
# transformers
# uvicorn
ray==2.24.0
# via vllm
referencing==0.35.1
# via
# jsonschema
# jsonschema-specifications
# outlines
regex==2024.5.15
# via
# nltk
# tiktoken
# transformers
requests==2.32.3
# via
# datasets
# huggingface-hub
# llama-index-core
# llama-index-legacy
# outlines
# ray
# tiktoken
# transformers
# vllm
rich==13.7.1
# via
# -r requirements.in
# typer
rpds-py==0.18.1
# via
# jsonschema
# referencing
safetensors==0.4.3
# via transformers
sentencepiece==0.2.0
# via vllm
shellingham==1.5.4
# via typer
six==1.16.0
# via python-dateutil
sniffio==1.3.1
# via
# anyio
# httpx
# openai
soupsieve==2.5
# via beautifulsoup4
sqlalchemy==2.0.30
# via
# llama-index-core
# llama-index-legacy
starlette==0.37.2
# via
# fastapi
# prometheus-fastapi-instrumentator
striprtf==0.0.26
# via llama-index-readers-file
sympy==1.12.1
# via torch
tenacity==8.3.0
# via
# llama-index-core
# llama-index-legacy
tiktoken==0.7.0
# via
# llama-index-core
# llama-index-legacy
# vllm
tokenizers==0.19.1
# via
# transformers
# vllm
torch==2.3.0
# via
# vllm
# vllm-flash-attn
# xformers
tqdm==4.66.4
# via
# datasets
# huggingface-hub
# llama-index-core
# nltk
# openai
# outlines
# transformers
transformers==4.41.2
# via vllm
triton==2.3.0
# via torch
typer==0.12.3
# via fastapi-cli
typing-extensions==4.12.2
# via
# anyio
# fastapi
# huggingface-hub
# llama-index-core
# llama-index-legacy
# openai
# pydantic
# pydantic-core
# pypdf
# sqlalchemy
# starlette
# torch
# typer
# typing-inspect
# uvicorn
# vllm
typing-inspect==0.9.0
# via
# dataclasses-json
# llama-index-core
# llama-index-legacy
tzdata==2024.1
# via pandas
ujson==5.10.0
# via fastapi
urllib3==2.2.1
# via requests
uvicorn==0.30.1
# via
# fastapi
# vllm
uvloop==0.19.0
# via uvicorn
vllm==0.5.0
# via -r requirements.in
vllm-flash-attn==2.5.9
# via vllm
watchfiles==0.22.0
# via uvicorn
websockets==12.0
# via uvicorn
wrapt==1.16.0
# via
# deprecated
# llama-index-core
xformers==0.0.26.post1
# via vllm
xxhash==3.4.1
# via datasets
yarl==1.9.4
# via aiohttp