gururise commited on
Commit
30a53de
1 Parent(s): 6008249

update gpu ram size for t4

Browse files
Files changed (1) hide show
  1. config.py +1 -1
config.py CHANGED
@@ -6,7 +6,7 @@ quantized = {
6
  "runtimedtype": torch.bfloat16,
7
  "useGPU": torch.cuda.is_available(),
8
  "chunksize": 32, # larger = more accurate, but more memory (and slower)
9
- "target": 24 # your gpu max size, excess vram offloaded to cpu
10
  }
11
 
12
  # UNCOMMENT TO SELECT OPTIONS
 
6
  "runtimedtype": torch.bfloat16,
7
  "useGPU": torch.cuda.is_available(),
8
  "chunksize": 32, # larger = more accurate, but more memory (and slower)
9
+ "target": 15 # your gpu max size, excess vram offloaded to cpu
10
  }
11
 
12
  # UNCOMMENT TO SELECT OPTIONS