activate use_cache to speed up inference

#23
by loubnabnl HF staff - opened
Files changed (1) hide show
  1. config.json +1 -1
config.json CHANGED
@@ -46,7 +46,7 @@
46
  "tokenizer_name": "replit/replit-code-v1-3b",
47
  "torch_dtype": "float32",
48
  "transformers_version": "4.28.1",
49
- "use_cache": false,
50
  "verbose": 0,
51
  "vocab_size": 32768
52
  }
 
46
  "tokenizer_name": "replit/replit-code-v1-3b",
47
  "torch_dtype": "float32",
48
  "transformers_version": "4.28.1",
49
+ "use_cache": true,
50
  "verbose": 0,
51
  "vocab_size": 32768
52
  }