Fixing the multi gpu inference bug
Browse files
modeling_internlm_xcomposer2.py
CHANGED
@@ -185,7 +185,7 @@ class InternLMXComposer2ForCausalLM(InternLM2PreTrainedModel):
|
|
185 |
wrap_im_mask.append(torch.zeros(part_embeds.shape[:2]))
|
186 |
temp_len += part_embeds.shape[1]
|
187 |
if idx < image_nums:
|
188 |
-
wrap_embeds.append(image[idx].unsqueeze(0))
|
189 |
wrap_im_mask.append(torch.ones(1, image[idx].shape[0]))
|
190 |
temp_len += im_len
|
191 |
|
|
|
185 |
wrap_im_mask.append(torch.zeros(part_embeds.shape[:2]))
|
186 |
temp_len += part_embeds.shape[1]
|
187 |
if idx < image_nums:
|
188 |
+
wrap_embeds.append(image[idx].unsqueeze(0).to(self.device))
|
189 |
wrap_im_mask.append(torch.ones(1, image[idx].shape[0]))
|
190 |
temp_len += im_len
|
191 |
|