import gc
import paddle
import unittest
import numpy as np
from PIL import Image
from ppdiffusers.utils import slow
from ppdiffusers.utils.testing_utils import require_paddle_gpu
from ppdiffusers import StableDiffusionPipeline, DDIMScheduler

import ptp
import null_text_ptp


class NullTextInversionFastTests(unittest.TestCase):
    pretrained_model_name_or_path = "CompVis/stable-diffusion-v1-4"
    LOW_RESOURCE = False
    NUM_DIFFUSION_STEPS = 50
    GUIDANCE_SCALE = 7.5
    MAX_NUM_WORDS = 77
    seed = 8888
    generator = paddle.Generator().manual_seed(seed) if seed else None
    device = paddle.device.set_device('gpu:0') if paddle.device.is_compiled_with_cuda() else paddle.device.set_device('cpu')
    model = StableDiffusionPipeline.from_pretrained(pretrained_model_name_or_path).to(device)

    def get_dummy_inputs(self, prompts):
        generic_param_dict = {
            "prompts": prompts,
            "num_steps": self.NUM_DIFFUSION_STEPS,
            "low_resource": self.LOW_RESOURCE,
        }
        diffusion_param_dict = {
              "ldm_stable": self.model,
              "run_baseline": False,
              "generator": self.generator,
              "verbose": True
        }
        controller = ptp.AttentionStore(self.LOW_RESOURCE)
        _, x_t = ptp.run_and_display(prompts[:1], 
                                            self.model, 
                                            controller, 
                                            self.NUM_DIFFUSION_STEPS, 
                                            self.GUIDANCE_SCALE, 
                                            latent=None, 
                                            run_baseline=False, 
                                            generator=self.generator, 
                                            uncond_embeddings=None, 
                                            low_resource=self.LOW_RESOURCE, 
                                            verbose=True)
        return (x_t, None), generic_param_dict, diffusion_param_dict

    def test_null_text_inversion_replace_edit(self):
        controller_param_dict = {
            "cross_replace_steps":  {"default_": 1., "lion": .4},
            "self_replace_steps": 0.4,
            "blend_words": None,
            "equilizer_params": None,
            "is_replace_controller": True,              
        }            
        prompts = ["A painting of a squirrel eating a burger", "A painting of a lion eating a burger"]
        (x_t, uncond_embeddings), generic_param_dict, diffusion_param_dict = self.get_dummy_inputs(prompts)
        controller = ptp.make_controller(tokenizer=self.model.tokenizer, max_num_words=self.MAX_NUM_WORDS, **controller_param_dict, **generic_param_dict)
        images, _ = ptp.run_and_display(controller=controller, latent=x_t, uncond_embeddings=uncond_embeddings, **diffusion_param_dict, **generic_param_dict)
        expected_slice = Image.open("./example_images/ptp_replace.png")
        return np.abs(np.array(images).flatten() - np.array(expected_slice).flatten()).max() < 0.05

    def test_null_text_inversion_refine_edit(self):
        controller_param_dict = {
            "cross_replace_steps": {"default_": 0.8,},
            "self_replace_steps": 0.4,
            "blend_words": None,
            "equilizer_params": None,
            "is_replace_controller": False,              
        }
        prompts = ["a photo of a house on a mountain",  "a photo of a house on a mountain at winter"]
        (x_t, uncond_embeddings), generic_param_dict, diffusion_param_dict = self.get_dummy_inputs(prompts)
        controller = ptp.make_controller(tokenizer=self.model.tokenizer, max_num_words=self.MAX_NUM_WORDS, **controller_param_dict, **generic_param_dict)
        images, _ = ptp.run_and_display(controller=controller, latent=x_t, uncond_embeddings=uncond_embeddings, **diffusion_param_dict, **generic_param_dict)
        expected_slice = Image.open("./example_images/ptp_refine.png")
        return np.abs(np.array(images).flatten() - np.array(expected_slice).flatten()).max() < 0.05
    
    def test_null_text_inversion_reweight_edit(self):
        controller_param_dict = {
            "cross_replace_steps":  {"default_": 0.8},
            "self_replace_steps": 0.4,
            "blend_words": None,
            "equilizer_params": {"words": ["smiling"], "values": [10]},
            "is_replace_controller": True,
        }
        prompts = ["a smiling bunny doll", "a smiling bunny doll"]
        (x_t, uncond_embeddings), generic_param_dict, diffusion_param_dict = self.get_dummy_inputs(prompts)
        controller = ptp.make_controller(tokenizer=self.model.tokenizer, max_num_words=self.MAX_NUM_WORDS, **controller_param_dict, **generic_param_dict)
        images, _ = ptp.run_and_display(controller=controller, latent=x_t, uncond_embeddings=uncond_embeddings, **diffusion_param_dict, **generic_param_dict)
        expected_slice = Image.open("./example_images/ptp_reweight.png")
        return np.abs(np.array(images).flatten() - np.array(expected_slice).flatten()).max() < 0.05



@slow
@require_paddle_gpu
class NullTextInversionSlowTests(unittest.TestCase):
    pretrained_model_name_or_path = "CompVis/stable-diffusion-v1-4"
    LOW_RESOURCE = False
    NUM_DIFFUSION_STEPS = 50
    GUIDANCE_SCALE = 7.5
    MAX_NUM_WORDS = 77
    seed = 8888
    generator = paddle.Generator().manual_seed(seed) if seed else None
    device = paddle.device.set_device('gpu:0') if paddle.device.is_compiled_with_cuda() else paddle.device.set_device('cpu')
    scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1)
    model = StableDiffusionPipeline.from_pretrained(pretrained_model_name_or_path, scheduler=scheduler).to(device)
    image_path = "./example_images/gnochi_mirror.jpeg"

    def tearDown(self):
        super().tearDown()
        gc.collect()
        paddle.device.cuda.empty_cache()

    def get_dummy_inputs(self, prompts):
        generic_param_dict = {
            "prompts": prompts,
            "num_steps": self.NUM_DIFFUSION_STEPS,
            "low_resource": self.LOW_RESOURCE,
        }
        diffusion_param_dict = {
              "ldm_stable": self.model,
              "run_baseline": False,
              "generator": self.generator,
              "verbose": True
        }
        null_inversion = null_text_ptp.NullInversion(self.model, self.NUM_DIFFUSION_STEPS, self.GUIDANCE_SCALE)
        _, x_t, uncond_embeddings = null_inversion.invert(self.image_path, prompts[0], offsets=(0, 0, 200, 0), verbose=True)
        controller = ptp.AttentionStore(self.LOW_RESOURCE)
        _, x_t = ptp.run_and_display(prompts[:1], 
                                            self.model, 
                                            controller, 
                                            self.NUM_DIFFUSION_STEPS, 
                                            self.GUIDANCE_SCALE, 
                                            latent=x_t, 
                                            run_baseline=False, 
                                            generator=self.generator, 
                                            uncond_embeddings=uncond_embeddings, 
                                            low_resource=self.LOW_RESOURCE, 
                                            verbose=False)
        return (x_t, uncond_embeddings), generic_param_dict, diffusion_param_dict

    def test_null_text_inversion_replace_reweight_local_edit(self):
        controller_param_dict = {
            "cross_replace_steps":  {"default_": 0.8,},
            "self_replace_steps": 0.5,
            "blend_words": [["cat"], ["tiger"]],
            "equilizer_params": {"words": ["tiger"], "values": [2]},
            "is_replace_controller": True,
        }
        prompts = ["a cat sitting next to a mirror", "a tiger sitting next to a mirror"]
        (x_t, uncond_embeddings), generic_param_dict, diffusion_param_dict = self.get_dummy_inputs(prompts)
        controller = ptp.make_controller(tokenizer=self.model.tokenizer, max_num_words=self.MAX_NUM_WORDS, **controller_param_dict, **generic_param_dict)
        images, _ = ptp.run_and_display(controller=controller, latent=x_t, uncond_embeddings=uncond_embeddings, **diffusion_param_dict, **generic_param_dict)
        expected_slice = Image.open("./example_images/gnochi_mirror_null_text_replace.png")
        return np.abs(np.array(images).flatten() - np.array(expected_slice).flatten()).max() < 0.05

    def test_null_text_inversion_refine_reweight_local_edit(self):
        controller_param_dict = {
            "cross_replace_steps":  {"default_": 0.8},
            "self_replace_steps": 0.6,
            "blend_words": [["cat"], ["cat"]],
            "equilizer_params": {"words": ["silver", "sculpture"], "values": [2, 2]},
            "is_replace_controller": False,
        }
        prompts = ["a cat sitting next to a mirror", "a silver cat sculpture sitting next to a mirror"]
        (x_t, uncond_embeddings), generic_param_dict, diffusion_param_dict = self.get_dummy_inputs(prompts)
        controller = ptp.make_controller(tokenizer=self.model.tokenizer, max_num_words=self.MAX_NUM_WORDS, **controller_param_dict, **generic_param_dict)
        images, _ = ptp.run_and_display(controller=controller, latent=x_t, uncond_embeddings=uncond_embeddings, **diffusion_param_dict, **generic_param_dict)
        expected_slice = Image.open("./example_images/gnochi_mirror_null_text_refine.png")
        return np.abs(np.array(images).flatten() - np.array(expected_slice).flatten()).max() < 0.05


if __name__ == "__main__":
    unittest.main()