nxphi47 commited on
Commit
1d9f225
1 Parent(s): ae91f33

Update multipurpose_chatbot/engines/transformers_engine.py

Browse files
multipurpose_chatbot/engines/transformers_engine.py CHANGED
@@ -546,7 +546,7 @@ class TransformersEngine(BaseEngine):
546
  raise gr.Error(message_safety)
547
 
548
  # @maybe_spaces_gpu
549
- def generate_yield_string(self, prompt, temperature, max_tokens, stop_strings: Optional[Tuple[str]] = None, **kwargs):
550
 
551
  # ! MUST PUT INSIDE torch.no_grad() otherwise it will overflow OOM
552
  import sys
@@ -564,7 +564,7 @@ class TransformersEngine(BaseEngine):
564
  inputs = self.tokenizer(prompt, return_tensors='pt')
565
  # whether to print the full prompts
566
  retok_full_prompt = self.tokenizer.decode(inputs.input_ids[0], skip_special_tokens=False)
567
- print(f"retok_full_prompt:\n{retok_full_prompt}>>>>")
568
  begin_bos = inputs.input_ids[0][0] == self.tokenizer.bos_token_id
569
  print(f'begin_bos: {begin_bos}')
570
 
 
546
  raise gr.Error(message_safety)
547
 
548
  # @maybe_spaces_gpu
549
+ def generate_yield_string(self, prompt, temperature=0.7, max_tokens=1024, stop_strings: Optional[Tuple[str]] = None, **kwargs):
550
 
551
  # ! MUST PUT INSIDE torch.no_grad() otherwise it will overflow OOM
552
  import sys
 
564
  inputs = self.tokenizer(prompt, return_tensors='pt')
565
  # whether to print the full prompts
566
  retok_full_prompt = self.tokenizer.decode(inputs.input_ids[0], skip_special_tokens=False)
567
+ # print(f"retok_full_prompt:\n{retok_full_prompt}>>>>")
568
  begin_bos = inputs.input_ids[0][0] == self.tokenizer.bos_token_id
569
  print(f'begin_bos: {begin_bos}')
570