willsh1997 commited on
Commit
2a10ca6
·
1 Parent(s): 986d421

:bug: remove mps code

Browse files
Files changed (1) hide show
  1. linkedin_gradio.py +1 -1
linkedin_gradio.py CHANGED
@@ -11,7 +11,7 @@ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, BitsAndB
11
 
12
 
13
  # quantization_config = BitsAndBytesConfig(load_in_4bit=True)
14
- torch_device = "cuda" if torch.cuda.is_available() else ("mps" if torch.mps.is_available() else "cpu")
15
 
16
  torch_dtype = torch.bfloat16 if torch_device in ["cuda", "mps"] else torch.float32
17
 
 
11
 
12
 
13
  # quantization_config = BitsAndBytesConfig(load_in_4bit=True)
14
+ torch_device = "cuda" if torch.cuda.is_available() else "cpu"
15
 
16
  torch_dtype = torch.bfloat16 if torch_device in ["cuda", "mps"] else torch.float32
17