quincyqiang commited on
Commit
37449e4
1 Parent(s): a064252

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -25,8 +25,8 @@ max_new_tokens = 2048
25
  print(f"Starting to load the model {model_name} into memory")
26
 
27
  tok = AutoTokenizer.from_pretrained(model_name)
28
- #m = AutoModelForCausalLM.from_pretrained(model_name).eval()
29
- m = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto")
30
  print("m=====>device",m.device)
31
  # tok.convert_tokens_to_ids(["<|im_end|>", "<|endoftext|>"])
32
  stop_token_ids = [tok.eos_token_id]
25
  print(f"Starting to load the model {model_name} into memory")
26
 
27
  tok = AutoTokenizer.from_pretrained(model_name)
28
+ m = AutoModelForCausalLM.from_pretrained(model_name).eval()
29
+ #m = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto")
30
  print("m=====>device",m.device)
31
  # tok.convert_tokens_to_ids(["<|im_end|>", "<|endoftext|>"])
32
  stop_token_ids = [tok.eos_token_id]