ruslanmv commited on
Commit
d5c08fc
1 Parent(s): 08c8208

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -2
app.py CHANGED
@@ -1,7 +1,21 @@
1
  import os
2
  os.system("pip3 install torch==2.2.1 torchvision torchaudio xformers --index-url https://download.pytorch.org/whl/cu121")
3
- os.system("git clone https://github.com/TimDettmers/bitsandbytes.git")
4
- os.system("cd bitsandbytes/ && pip install -r requirements-dev.txt && cmake -DCOMPUTE_BACKEND=cuda -S . && make && pip install .")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  # Check if GPU is available
6
  import gradio as gr
7
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
 
1
  import os
2
  os.system("pip3 install torch==2.2.1 torchvision torchaudio xformers --index-url https://download.pytorch.org/whl/cu121")
3
+ import torch
4
+ major_version, minor_version = torch.cuda.get_device_capability()
5
+ # Must install separately since Colab has torch 2.2.1, which breaks packages
6
+ !pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
7
+ if major_version >= 8:
8
+ # Use this for new GPUs like Ampere, Hopper GPUs (RTX 30xx, RTX 40xx, A100, H100, L40)
9
+ !pip install --no-deps packaging ninja einops flash-attn xformers trl peft \
10
+ accelerate bitsandbytes
11
+ else:
12
+ # Use this for older GPUs (V100, Tesla T4, RTX 20xx)
13
+ !pip install --no-deps trl peft accelerate bitsandbytes
14
+ pass
15
+
16
+
17
+ #os.system("git clone https://github.com/TimDettmers/bitsandbytes.git")
18
+ #os.system("cd bitsandbytes/ && pip install -r requirements-dev.txt && cmake -DCOMPUTE_BACKEND=cuda -S . && make && pip install .")
19
  # Check if GPU is available
20
  import gradio as gr
21
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig