File size: 967 Bytes
ccbebae
 
 
 
 
 
268e260
 
 
530ad7c
268e260
ccbebae
 
 
 
 
268e260
 
 
 
 
 
ccbebae
 
 
 
 
 
 
268e260
ccbebae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268e260
ccbebae
268e260
ccbebae
 
 
 
 
 
268e260
 
 
ccbebae
 
268e260
988080b
ccbebae
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
# Flash Attention - CUDA 12, PyTorch 2.6, Python 3.10
flash-attn @ https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.3/flash_attn-2.7.3+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl

# Core ML/AI Libraries
torch==2.6.0
torchvision
accelerate>=0.24.0

# Transformers - using version compatible with both sets of models
transformers==4.57.1
tokenizers>=0.20.3
transformers-stream-generator

# Hugging Face
huggingface_hub
hf_xet
spaces>=0.20.0

# Model architectures and utilities
xformers
einops
peft

# Vision & Image Processing
qwen-vl-utils
albumentations
opencv-python
pyvips
pyvips-binary
Pillow>=10.0.0
timm
supervision

# Document Processing
docling-core
python-docx
pymupdf
pdf2image
markdown
html2text

# PDF Generation
reportlab
fpdf

# Text Processing
sentencepiece>=0.1.99
num2words
protobuf>=3.20.0

# Utilities
loguru
requests
httpx
click
addict
easydict
safetensors>=0.4.0

# Web Interface
gradio>=4.0.0

# Video Processing
av