dinhquangson commited on
Commit
2c9cfaf
1 Parent(s): 28f141a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -3,18 +3,18 @@ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
3
 
4
  model_path = "vinai/PhoGPT-7B5-Instruct"
5
 
6
- config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
7
  config.init_device = "cuda"
8
  # config.attn_config['attn_impl'] = 'triton' # Enable if "triton" installed!
9
 
10
  model = AutoModelForCausalLM.from_pretrained(
11
- model_path, config=config, torch_dtype=torch.bfloat16, trust_remote_code=True
12
  )
13
  # If your GPU does not support bfloat16:
14
  # model = AutoModelForCausalLM.from_pretrained(model_path, config=config, torch_dtype=torch.float16, trust_remote_code=True)
15
  model.eval()
16
 
17
- tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
18
 
19
 
20
  def answer(input_prompt):
 
3
 
4
  model_path = "vinai/PhoGPT-7B5-Instruct"
5
 
6
+ config = AutoConfig.from_pretrained(model_path, trust_remote_code=True, token='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')
7
  config.init_device = "cuda"
8
  # config.attn_config['attn_impl'] = 'triton' # Enable if "triton" installed!
9
 
10
  model = AutoModelForCausalLM.from_pretrained(
11
+ model_path, config=config, torch_dtype=torch.bfloat16, trust_remote_code=True, token='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'
12
  )
13
  # If your GPU does not support bfloat16:
14
  # model = AutoModelForCausalLM.from_pretrained(model_path, config=config, torch_dtype=torch.float16, trust_remote_code=True)
15
  model.eval()
16
 
17
+ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, token='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')
18
 
19
 
20
  def answer(input_prompt):