from diffusers import StableDiffusionPipeline, AutoPipelineForText2Image, EulerAncestralDiscreteScheduler
from modelscope.pipelines import pipeline
import torch
from omegaconf import OmegaConf
from transformers import (CLIPTextConfig, CLIPTextModel, CLIPTokenizer)

if __name__ == "__main__":
    base_model_path = 'E:/data/ai/sd-model/majicmixRealistic_v7.safetensors'
    cache_dir = 'E:/data/ai/lib'
    print(base_model_path)
    # config = CLIPTextConfig.from_pretrained("E:/data/ai/lib/clip-vit-large-patch14", local_files_only=True)
    # text_model = CLIPTextModel(config)
    # tokenizer = CLIPTokenizer.from_pretrained("E:/data/ai/lib/clip-vit-large-patch14", local_files_only=True)
    #
    # print('load text_model end')
    #
    pipe = StableDiffusionPipeline.from_single_file(base_model_path, safety_checker=None, use_safetensors=True,
                                                    local_files_only=None,
                                                    # cache_dir=cache_dir,
                                                    # original_config_file='E:/data/ai/config/v1-inference.yaml',
                                                    from_safetensor=True, device='cuda', load_safety_checker=False,
                                                    # text_encoder=text_model,
                                                    # tokenizer=tokenizer
                                                    )
    pretrained_path = 'E:/data/ai/safetensors-dump/majicmixRealistic_v7'
    # pipe = StableDiffusionPipeline.from_pretrained(pretrained_path)
    # pipe = AutoPipelineForText2Image.from_pretrained(pretrained_path);
    print('load model end')
    # pipe.save_pretrained("E:/data/ai/safetensors-dump/majicmixRealistic_v7");
    lora_path = 'E:/data/ai/convert/persion1.safetensors'
    # pipe.load_lora_weights(lora_path)

    pipe = pipe.to("cuda")
    print('send end')
    pos_prompt = '1male, black classic suit, ties, bowties, bar background, upper_body, raw photo, masterpiece, solo, medium shot, high detail face, photorealistic, best quality'
    neg_prompt = ('(nsfw:2), paintings, sketches, (worst quality:2), (low quality:2), lowers, normal quality, '
                  '((monochrome)), ((grayscale)), logo, word, character, bad hand, tattoo, (username, watermark, '
                  'signature, time signature, timestamp, artist name, copyright name, copyright),low res, '
                  '((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, age spot, glans, extra fingers, '
                  'fewer fingers, strange fingers, bad hand, mole, ((extra legs)), ((extra hands)), children')
    height = 512
    width = 512
    batch_size = 1
    num_inference_steps = 30
    pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
    generator = torch.Generator(device="cuda").manual_seed(3159416547)
    images_style = pipe(prompt=pos_prompt, height=height, width=width, guidance_scale=7, negative_prompt=neg_prompt,
                        generator=generator,
                        num_inference_steps=num_inference_steps, num_images_per_prompt=batch_size).images
    print('exec end')
    for index in range(len(images_style)):
        image = images_style[index]
        output_path = f"image_of_{index}.png"
        image.save(output_path)
    print('run end')
