from shap_e.diffusion.sample import sample_latents from shap_e.diffusion.gaussian_diffusion import diffusion_from_config as diffusion_from_config_shape from shap_e.models.download import load_model, load_config from shap_e.util.notebooks import create_pan_cameras, decode_latent_images, gif_widget from shap_e.util.notebooks import decode_latent_mesh import torch device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') xm = load_model('transmitter', device=device) model = load_model('text300M', device=device) # model.load_state_dict(torch.load('./load/shapE_finetuned_with_330kdata.pth', map_location=device)['model_state_dict']) diffusion = diffusion_from_config_shape(load_config('diffusion')) del xm del model del diffusion from diffusers import DDIMScheduler, DDPMScheduler, StableDiffusionPipeline pipe_kwargs = { "tokenizer": None, "safety_checker": None, "feature_extractor": None, "requires_safety_checker": False, "torch_dtype": torch.float16 , } pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1-base", **pipe_kwargs, )