rphrp1985 commited on
Commit
d491d34
1 Parent(s): b91461d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -40,13 +40,13 @@ tokenizer = AutoTokenizer.from_pretrained(
40
  , token= token,)
41
 
42
 
43
- with init_empty_weights():
44
- model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
45
  # torch_dtype= torch.uint8,
46
  torch_dtype=torch.float16,
47
  # torch_dtype=torch.fl,
48
  # attn_implementation="flash_attention_2",
49
- low_cpu_mem_usage=True,
50
  # llm_int8_enable_fp32_cpu_offload=True,
51
  device_map='cuda',
52
 
@@ -56,7 +56,7 @@ with init_empty_weights():
56
  #
57
 
58
 
59
- device_map = infer_auto_device_map(model, max_memory={0: "79GB", "cpu":"65GB" })
60
 
61
  # Load the model with the inferred device map
62
  # model = load_checkpoint_and_dispatch(model, model_id, device_map=device_map, no_split_module_classes=["GPTJBlock"])
@@ -77,8 +77,8 @@ def respond(
77
  messages = [{"role": "user", "content": "Hello, how are you?"}]
78
  input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to('cuda')
79
  ## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
80
- with autocast():
81
- gen_tokens = model.generate(
82
  input_ids,
83
  max_new_tokens=100,
84
  # do_sample=True,
 
40
  , token= token,)
41
 
42
 
43
+
44
+ model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
45
  # torch_dtype= torch.uint8,
46
  torch_dtype=torch.float16,
47
  # torch_dtype=torch.fl,
48
  # attn_implementation="flash_attention_2",
49
+ # low_cpu_mem_usage=True,
50
  # llm_int8_enable_fp32_cpu_offload=True,
51
  device_map='cuda',
52
 
 
56
  #
57
 
58
 
59
+ # device_map = infer_auto_device_map(model, max_memory={0: "79GB", "cpu":"65GB" })
60
 
61
  # Load the model with the inferred device map
62
  # model = load_checkpoint_and_dispatch(model, model_id, device_map=device_map, no_split_module_classes=["GPTJBlock"])
 
77
  messages = [{"role": "user", "content": "Hello, how are you?"}]
78
  input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to('cuda')
79
  ## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
80
+ # with autocast():
81
+ gen_tokens = model.generate(
82
  input_ids,
83
  max_new_tokens=100,
84
  # do_sample=True,