Locutusque commited on
Commit
dec3cbd
1 Parent(s): f78292a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -5,10 +5,8 @@ import subprocess
5
  import spaces
6
 
7
 
8
- @spaces.GPU
9
- def _build_flash_attn():
10
- subprocess.check_call("pip install flash-attn", shell=True)
11
- _build_flash_attn() # This is how we'll build flash-attn.
12
  # Initialize the model pipeline
13
  generator = pipeline('text-generation', model='mistralai/Mistral-7B-v0.1', torch_dtype=torch.bfloat16, use_flash_attention_2=True)
14
  @spaces.GPU
 
5
  import spaces
6
 
7
 
8
+ # Install flash-attn
9
+ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
 
 
10
  # Initialize the model pipeline
11
  generator = pipeline('text-generation', model='mistralai/Mistral-7B-v0.1', torch_dtype=torch.bfloat16, use_flash_attention_2=True)
12
  @spaces.GPU