import os
import torch
import torch_npu


import PIL
from PIL import Image

from diffusers import StableDiffusionPipeline
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from torch_npu.contrib import transfer_to_npu


torch.npu.set_compile_mode(jit=False)

device = "npu"

def image_grid(imgs, rows, cols):
    assert len(imgs) == rows * cols

    w, h = imgs[0].size
    grid = Image.new("RGB",size=(cols * w, rows * h))
    grid_w, grid_h = grid.size


    for i, img in enumerate(imgs):
        grid.paste(img,box=(i % cols * w, i // cols * h))
    return grid

pretrained_model_name = "/stable-diffusion-v1-5"
repo_id = "/cat-toy"

prompt = "a grafitti in a favela wall with a <cat-toy> on it"

pipeline = StableDiffusionPipeline.from_pretrained(pretrained_model_name).to(device)

pipeline.load_textual_inversion(repo_id)

num_samples = 2
num_rows = 2

all_images = []

for _ in range(num_rows):
    images = pipeline(prompt, num_images_per_prompt=num_samples, num_inference_steps=50, gudiance_scale=7.5).images
    all_images.extend(images)

grid = image_grid(all_images,num_samples,num_rows)
grid.save("./grad.png")