mac M2 上尝试微调 chatglm-6b-int4 失败

#10
by xiao111 - opened
  1. 尝试在 m2 上微调 chatglm-6b-int4 报错,看起来是 kernels == None,当加载 from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up 异常;
Traceback (most recent call last):
  File "/Users/diaojunxian/Documents/agi/ChatGLM-6B/ptuning/main.py", line 433, in <module>
    main()
  File "/Users/diaojunxian/Documents/agi/ChatGLM-6B/ptuning/main.py", line 372, in main
    train_result = trainer.train(resume_from_checkpoint=checkpoint)
                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/Documents/agi/ChatGLM-6B/ptuning/trainer.py", line 1635, in train
    return inner_training_loop(
           ^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/Documents/agi/ChatGLM-6B/ptuning/trainer.py", line 1904, in _inner_training_loop
    tr_loss_step = self.training_step(model, inputs)
                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/Documents/agi/ChatGLM-6B/ptuning/trainer.py", line 2647, in training_step
    loss = self.compute_loss(model, inputs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/Documents/agi/ChatGLM-6B/ptuning/trainer.py", line 2679, in compute_loss
    outputs = model(**inputs)
              ^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/anaconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1502, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/anaconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1511, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/.cache/huggingface/modules/transformers_modules/chatglm-6b-int4/modeling_chatglm.py", line 1190, in forward
    transformer_outputs = self.transformer(
                          ^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/anaconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1502, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/anaconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1511, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/.cache/huggingface/modules/transformers_modules/chatglm-6b-int4/modeling_chatglm.py", line 985, in forward
    layer_ret = torch.utils.checkpoint.checkpoint(
                ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/anaconda3/envs/py3.11/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 356, in checkpoint
    return CheckpointFunction.apply(function, preserve, *args)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/anaconda3/envs/py3.11/lib/python3.11/site-packages/torch/autograd/function.py", line 506, in apply
    return super().apply(*args, **kwargs)  # type: ignore[misc]
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/anaconda3/envs/py3.11/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 175, in forward
    outputs = run_function(*args)
              ^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/anaconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1502, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/anaconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1511, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/.cache/huggingface/modules/transformers_modules/chatglm-6b-int4/modeling_chatglm.py", line 627, in forward
    attention_outputs = self.attention(
                        ^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/anaconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1502, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/anaconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1511, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/.cache/huggingface/modules/transformers_modules/chatglm-6b-int4/modeling_chatglm.py", line 445, in forward
    mixed_raw_layer = self.query_key_value(hidden_states)
                      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/anaconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1502, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/anaconda3/envs/py3.11/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1511, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/.cache/huggingface/modules/transformers_modules/chatglm-6b-int4/quantization.py", line 391, in forward
    output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/anaconda3/envs/py3.11/lib/python3.11/site-packages/torch/autograd/function.py", line 506, in apply
    return super().apply(*args, **kwargs)  # type: ignore[misc]
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/.cache/huggingface/modules/transformers_modules/chatglm-6b-int4/quantization.py", line 56, in forward
    weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/diaojunxian/.cache/huggingface/modules/transformers_modules/chatglm-6b-int4/quantization.py", line 274, in extract_weight_to_half
    func = kernels.int4WeightExtractionHalf
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
AttributeError: 'NoneType' object has no attribute 'int4WeightExtractionHalf'

在M1,python=3.8.16调试上面遇到同样的问题,发生在推理的环节。在其他地方看到说是N卡算力不够,但我用的是mps,难道也是算力不够引起的

在M1,python=3.8.16调试上面遇到同样的问题,发生在推理的环节。在其他地方看到说是N卡算力不够,但我用的是mps,难道也是算力不够引起的

不是的,是因为本身设置的问题走到了 cuda 这个else 分支,用 cpu 训练看看;

Sign up or log in to comment