from diffusers import StableDiffusionPipeline, AutoPipelineForText2Image, EulerAncestralDiscreteScheduler, \
    EulerDiscreteScheduler, AutoencoderKL
from modelscope.pipelines import pipeline
import torch
from omegaconf import OmegaConf
from transformers import (CLIPTextConfig, CLIPTextModel, CLIPTokenizer)
from compel import Compel
import lpw_stable_diffusion as lpw

# https://medium.com/mlearning-ai/using-civitai-models-with-diffusers-package-45e0c475a67e --

def from_zero(weights, base_emb):
    weight_tensor = torch.tensor(weights, dtype=base_emb.dtype, device=base_emb.device)
    weight_tensor = weight_tensor.reshape(1, -1, 1).expand(base_emb.shape)
    return base_emb * weight_tensor


def A1111_renorm(base_emb, weighted_emb):
    embeddings_final = (base_emb.mean() / weighted_emb.mean()) * weighted_emb
    return embeddings_final


if __name__ == "__main__":

    clip_skip = 1
    cur_device = 'cuda'

    load_pipe_model = 'from_single_file'
    # load_pipe_model = 'from_pretrained'
    pipe = None
    if load_pipe_model == 'from_single_file':
        base_model_path = 'E:/data/ai/sd-model/majicmixRealistic_v7.safetensors'
        vae_model_path = 'D:/deeplab/sd-webui-aki/sd-webui-aki-v4.1/models/VAE/vae-ft-mse-840000-ema-pruned.safetensors'
        cache_dir = 'E:/data/ai/lib'
        config_file = 'E:/data/ai/config/v1-inference.yaml'
        print(base_model_path)
        # config = CLIPTextConfig.from_pretrained("E:/data/ai/lib/clip-vit-large-patch14",
        #                                         local_files_only=True,
        #                                         num_hidden_layers=12 - (clip_skip - 1))
        config = CLIPTextConfig.from_pretrained("E:/data/ai/lib/clip-vit-large-patch14",
                                                local_files_only=True)
        text_model = CLIPTextModel(config)
        tokenizer = CLIPTokenizer.from_pretrained("E:/data/ai/lib/clip-vit-large-patch14", local_files_only=True)
        vae = AutoencoderKL.from_single_file(vae_model_path, config_file=config_file, use_safetensors=True,
                                             local_files_only=True)
        print('load text_model end')

        vae.to(cur_device)
        text_model.to(cur_device)
        pipe = StableDiffusionPipeline.from_single_file(base_model_path, safety_checker=None, use_safetensors=True,
                                                        local_files_only=True,
                                                        # cache_dir=cache_dir,
                                                        torch_dtype=torch.float32,
                                                        original_config_file=config_file,
                                                        from_safetensor=True, device='cuda', load_safety_checker=False,
                                                        text_encoder=text_model,
                                                        tokenizer=tokenizer,
                                                        vae=vae
                                                        )
    elif load_pipe_model == 'from_pretrained':
        pretrained_path = 'E:/data/ai/safetensors-dump/majicmixRealistic_v7'
        pipe = StableDiffusionPipeline.from_pretrained(pretrained_path)
    print('load model end')
    # pipe.save_pretrained("E:/data/ai/safetensors-dump/majicmixRealistic_v7");
    # lora_path = 'E:/data/ai/convert/persion1.safetensors'
    lora_path = 'D:/deeplab/sd-webui-aki/sd-webui-aki-v4.1/models/Lora/FilmVelvia3.safetensors'
    # pipe.load_lora_weights(lora_path, adapter_name="FilmVelvia3")
    # pipe.fuse_lora(fuse_unet=True, fuse_text_encoder=True, lora_scale=0.6)
    # pipe.set_adapters(["FilmVelvia3"], adapter_weights=[0.3])
    # lora_w = 0.6
    # pipe._lora_scale = lora_w
    #
    # state_dict, network_alphas = pipe.lora_state_dict(
    #     lora_path
    # )
    #
    # for key in network_alphas:
    #     network_alphas[key] = network_alphas[key] * lora_w
    #
    # pipe.load_lora_into_unet(
    #     state_dict=state_dict
    #     , network_alphas=network_alphas
    #     , unet=pipe.unet
    # )

    # pipe.load_lora_into_text_encoder(
    #     state_dict=state_dict
    #     , network_alphas=network_alphas
    #     , text_encoder=pipe.text_encoder
    # )

    pipe = pipe.to("cuda")
    print('send end')
    pos_prompt = '1girl,hair with bangs,black long dress,orange background,'
    neg_prompt = ('(worst quality:2),(low quality:2),(normal quality:2),lowres,watermark,')
    width = 512
    height = 768
    batch_size = 1
    num_inference_steps = 30
    # pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
    pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
    generator = torch.Generator(device="cuda").manual_seed(4216575493)

    pos_text_embeddings, neg_text_embeddings = lpw.get_weighted_text_embeddings(pipe, prompt=pos_prompt,
                                                                                uncond_prompt=neg_prompt)

    compel_proc = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder)

    # prompt_embeds = compel_proc.build_conditioning_tensor(pos_prompt)
    # negative_prompt_embeds = compel_proc.build_conditioning_tensor(neg_prompt)
    #prompt_embeds = compel_proc(pos_prompt)
    #negative_prompt_embeds = compel_proc(neg_prompt)

    weights = [
        [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
         1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
         1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
         1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]

    neg_weights = [
        [1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
         1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
         1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
         1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]
    #prompt_embeds = from_zero(weights, prompt_embeds)
    #prompt_embeds_final = A1111_renorm(prompt_embeds, prompt_embeds)

    #negative_prompt_embeds = from_zero(neg_weights, negative_prompt_embeds)
    #negative_prompt_embeds_final = A1111_renorm(negative_prompt_embeds, negative_prompt_embeds)
    images_style = pipe(
        # prompt=pos_prompt,
        # negative_prompt=neg_prompt,
        height=height, width=width, guidance_scale=7,
        generator=generator,
        num_inference_steps=num_inference_steps, num_images_per_prompt=batch_size,
        # clip_skip=clip_skip,
        # prompt_embeds=prompt_embeds_final,
        # negative_prompt_embeds=negative_prompt_embeds_final
        prompt_embeds=pos_text_embeddings,
        negative_prompt_embeds=neg_text_embeddings
    ).images
    print('exec end')
    for index in range(len(images_style)):
        image = images_style[index]
        output_path = f"image_of_{index}.png"
        image.save(output_path)
    print('run end')
