max new tokens error despite setting lower size, The size of tensor a (8192) must match the size of tensor b (8193) at non-singleton dimension 2

#12
by LaZy3138 - opened

code:

inputs = str.format("<|system|>\n<|end|>\n<|user|>%s<|end|>\n<|assistant|>" % (question))
logger.info(f"input is :\n{inputs}.")
pipe = pipeline("text-generation", model="/root_fs/home/tonyaw/machine_learning/nlp/huggingface.co/starchat-alpha")
outputs = pipe(inputs, max_new_tokens=8000)
logger.info(f"output=\n{outputs}")

error:

Unhandled Exception
Traceback (most recent call last):
  File "./llm_test.py", line 126, in
    starchat_test()
  File "./llm_test.py", line 117, in starchat_test
    outputs = pipe(inputs, max_new_tokens=8000)
  File "/usr/local/lib/python3.8/dist-packages/transformers/pipelines/text_generation.py", line 209, in call
    return super().call(text_inputs, **kwargs)
  File "/usr/local/lib/python3.8/dist-packages/transformers/pipelines/base.py", line 1109, in call
    return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)
  File "/usr/local/lib/python3.8/dist-packages/transformers/pipelines/base.py", line 1116, in run_single
    model_outputs = self.forward(model_inputs, **forward_params)
  File "/usr/local/lib/python3.8/dist-packages/transformers/pipelines/base.py", line 1015, in forward
    model_outputs = self._forward(model_inputs, **forward_params)
  File "/usr/local/lib/python3.8/dist-packages/transformers/pipelines/text_generation.py", line 251, in _forward
    generated_sequence = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, **generate_kwargs)
  File "/usr/local/lib/python3.8/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
    return func(*args, **kwargs)
  File "/usr/local/lib/python3.8/dist-packages/transformers/generation/utils.py", line 1437, in generate
    return self.greedy_search(
  File "/usr/local/lib/python3.8/dist-packages/transformers/generation/utils.py", line 2248, in greedy_search
    outputs = self(
  File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1501, in _call_impl
    return forward_call(*args, **kwargs)
  File "/usr/local/lib/python3.8/dist-packages/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py", line 808, in forward
    transformer_outputs = self.transformer(
  File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1501, in _call_impl
    return forward_call(*args, **kwargs)
  File "/usr/local/lib/python3.8/dist-packages/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py", line 605, in forward
    self_attention_mask = self_attention_mask * attention_mask.view(batch_size, 1, -1).to(
RuntimeError: The size of tensor a (8192) must match the size of tensor b (8193) at non-singleton dimension 2

Sign up or log in to comment