Error on generate

#1
by devopen - opened

Global seed set to 1686319981
Traceback (most recent call last):
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/gradio/routes.py", line 374, in run_predict
output = await app.get_blocks().process_api(
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/gradio/blocks.py", line 1017, in process_api
result = await self.call_function(
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/gradio/blocks.py", line 835, in call_function
prediction = await anyio.to_thread.run_sync(
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/anyio/to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 867, in run
result = context.run(func, *args)
File "app.py", line 38, in process
cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
File "/home/user/app/ldm/models/diffusion/ddpm.py", line 667, in get_learned_conditioning
c = self.cond_stage_model.encode(c)
File "/home/user/app/ldm/modules/encoders/modules.py", line 131, in encode
return self(text)
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/user/app/ldm/modules/encoders/modules.py", line 121, in forward
outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer=="hidden")
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py", line 722, in forward
return self.text_model(
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py", line 632, in forward
hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py", line 165, in forward
inputs_embeds = self.token_embedding(input_ids)
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/torch/nn/modules/sparse.py", line 158, in forward
return F.embedding(
File "/home/user/.pyenv/versions/3.8.9/lib/python3.8/site-packages/torch/nn/functional.py", line 2199, in embedding
return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument index in method wrapper__index_select)

The error should be solved now. Please let me know if it works

It did not work

RamAnanth1 changed discussion status to closed

Sign up or log in to comment