iamkhadke commited on
Commit
bf0b168
β€’
1 Parent(s): b34d408

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -17,7 +17,7 @@ quantization_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True)
17
  tok = AutoTokenizer.from_pretrained("stabilityai/stablelm-tuned-alpha-3b", device_map="auto", load_in_8bit=True, torch_dtype=torch.float16 )
18
  m = AutoModelForCausalLM.from_pretrained("stabilityai/stablelm-tuned-alpha-3b", device_map= "auto", quantization_config=quantization_config,
19
  offload_folder="./")
20
- generator = pipeline('text-generation', model=m, tokenizer=tok, device=1)
21
  print(f"Sucessfully loaded the model to the memory")
22
 
23
  start_message = """<|SYSTEM|># StableAssistant
 
17
  tok = AutoTokenizer.from_pretrained("stabilityai/stablelm-tuned-alpha-3b", device_map="auto", load_in_8bit=True, torch_dtype=torch.float16 )
18
  m = AutoModelForCausalLM.from_pretrained("stabilityai/stablelm-tuned-alpha-3b", device_map= "auto", quantization_config=quantization_config,
19
  offload_folder="./")
20
+ # generator = pipeline('text-generation', model=m, tokenizer=tok, device=1)
21
  print(f"Sucessfully loaded the model to the memory")
22
 
23
  start_message = """<|SYSTEM|># StableAssistant