AIencoder commited on
Commit
12dd28c
·
verified ·
1 Parent(s): a61623a

Update requirements.txt

Browse files
Files changed (1) hide show
  1. requirements.txt +6 -7
requirements.txt CHANGED
@@ -1,10 +1,10 @@
1
  # Gradio 5 (avoids 4.x's Starlette template signature breakage).
2
  gradio>=5.5,<6.0
3
 
4
- # CRITICAL: gradio 5.5's oauth.py still does `from huggingface_hub import
5
- # HfFolder` — HfFolder was removed in huggingface_hub 1.0. Pin <1.0.
6
- # (Newer gradio 5.20+ uses get_token instead, but the 5.5 LTS line we use
7
- # is more conservative and well-tested on Spaces.)
8
  huggingface_hub>=0.24,<1.0
9
 
10
  matplotlib>=3.7
@@ -12,6 +12,5 @@ numpy>=1.24
12
  torch>=2.0
13
  pillow>=10.0
14
 
15
- # Prebuilt llama-cpp-python wheel from AIencoder/llama-cpp-wheels.
16
- # CPU-only, AVX2 + FMA + F16C — works on every HF Space x86_64 host.
17
- https://huggingface.co/datasets/AIencoder/llama-cpp-wheels/resolve/main/llama_cpp_python-0.3.16%2Bbasic_avx2_fma_f16c-cp312-cp312-manylinux_2_31_x86_64.whl
 
1
  # Gradio 5 (avoids 4.x's Starlette template signature breakage).
2
  gradio>=5.5,<6.0
3
 
4
+ # Pin Pydantic to fix the "bool is not iterable" error
5
+ pydantic==2.10.6
6
+
7
+ # CRITICAL: huggingface_hub pin for Gradio 5.5 compatibility
8
  huggingface_hub>=0.24,<1.0
9
 
10
  matplotlib>=3.7
 
12
  torch>=2.0
13
  pillow>=10.0
14
 
15
+ # Prebuilt llama-cpp-python wheel
16
+ https://huggingface.co/datasets/AIencoder/llama-cpp-wheels/resolve/main/llama_cpp_python-0.3.16%2Bbasic_avx2_fma_f16c-cp312-cp312-manylinux_2_31_x86_64.whl