ffreemt commited on
Commit
e53c8cf
1 Parent(s): 6ba78c1

Update torch.cuda.is_available() for device/device_type

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -261,8 +261,8 @@ def gen_local_llm(model_id="TheBloke/vicuna-7B-1.1-HF"):
261
  model = LlamaForCausalLM.from_pretrained(
262
  model_id,
263
  # load_in_8bit=True, # set these options if your GPU supports them!
264
- # device_map=1#'auto',
265
- # torch_dtype=torch.float16,
266
  low_cpu_mem_usage=True
267
  )
268
  else:
 
261
  model = LlamaForCausalLM.from_pretrained(
262
  model_id,
263
  # load_in_8bit=True, # set these options if your GPU supports them!
264
+ device_map="auto",
265
+ torch_dtype=torch.float16,
266
  low_cpu_mem_usage=True
267
  )
268
  else: