Zeph27 commited on
Commit
eb2a682
β€’
1 Parent(s): e4207af

update reqs

Browse files
Files changed (2) hide show
  1. app.py +0 -3
  2. requirements.txt +4 -1
app.py CHANGED
@@ -5,9 +5,6 @@ from decord import VideoReader, cpu
5
  import os
6
  import spaces
7
 
8
- import subprocess
9
- subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
10
-
11
  # Load the model and tokenizer
12
  model_name = "openbmb/MiniCPM-V-2_6-int4"
13
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
 
5
  import os
6
  import spaces
7
 
 
 
 
8
  # Load the model and tokenizer
9
  model_name = "openbmb/MiniCPM-V-2_6-int4"
10
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
requirements.txt CHANGED
@@ -6,4 +6,7 @@ sentencepiece==0.1.99
6
  accelerate==0.30.1
7
  bitsandbytes==0.43.1
8
  decord
9
- numpy<2
 
 
 
 
6
  accelerate==0.30.1
7
  bitsandbytes==0.43.1
8
  decord
9
+ numpy<2
10
+ https://github.com/Dao-AILab/flash-attention/releases/download/v2.6.2/flash_attn-2.6.2+cu123torch2.1cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
11
+ http://thunlp.oss-cn-qingdao.aliyuncs.com/multi_modal/never_delete/modelscope_studio-0.4.0.9-py3-none-any.whl
12
+ opencv-python