SrikanthChellappa commited on
Commit
45a0b45
1 Parent(s): 9ed52b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -31,14 +31,14 @@ bnb_config = BitsAndBytesConfig(
31
  bnb_4bit_compute_dtype=torch.bfloat16,
32
  )
33
  #bnb_4bit_compute_dtype=torch.float16,
34
- tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
35
  tokenizer.pad_token = tokenizer.eos_token
36
  model = AutoModelForCausalLM.from_pretrained(
37
  model_name,
38
  quantization_config=bnb_config,
39
  device_map="auto",
40
  trust_remote_code=True,
41
- token="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"
42
  )
43
  #model = AutoModelForCausalLM.from_pretrained('./mistral_model_7B_8bit_Q/', quantization_config=quantization_config)
44
  generation_config = GenerationConfig(max_new_tokens=1024, pad_token_id = tokenizer.eos_token_id, \
 
31
  bnb_4bit_compute_dtype=torch.bfloat16,
32
  )
33
  #bnb_4bit_compute_dtype=torch.float16,
34
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True, token="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")
35
  tokenizer.pad_token = tokenizer.eos_token
36
  model = AutoModelForCausalLM.from_pretrained(
37
  model_name,
38
  quantization_config=bnb_config,
39
  device_map="auto",
40
  trust_remote_code=True,
41
+ token="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"
42
  )
43
  #model = AutoModelForCausalLM.from_pretrained('./mistral_model_7B_8bit_Q/', quantization_config=quantization_config)
44
  generation_config = GenerationConfig(max_new_tokens=1024, pad_token_id = tokenizer.eos_token_id, \