File size: 4,674 Bytes
db38d08
 
 
 
8cb7e17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
718d2d0
 
 
 
8cb7e17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6e1678d
8cb7e17
 
6e1678d
8cb7e17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2de9e48
8cb7e17
 
 
 
db38d08
 
 
 
 
 
2de9e48
db38d08
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
#@title
import numpy as np
import gradio as gr

import argparse
import itertools
import math
import os
import random

import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch.utils.data import Dataset

import PIL
from diffusers import StableDiffusionImg2ImgPipeline # 修正箇所
from diffusers import AutoencoderKL, DDPMScheduler, PNDMScheduler, UNet2DConditionModel
from diffusers.hub_utils import init_git_repo, push_to_hub
from diffusers.optimization import get_scheduler
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
from PIL import Image
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer

MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')
YOUR_TOKEN=MY_SECRET_TOKEN
device="cpu"

pretrained_model_name_or_path = "CompVis/stable-diffusion-v1-4" #@param {type:"string"}

from IPython.display import Markdown
from huggingface_hub import hf_hub_download
#@title Load your concept here
#@markdown Enter the `repo_id` for a concept you like (you can find pre-learned concepts in the public [SD Concepts Library](https://huggingface.co/sd-concepts-library))
repo_id_embeds = "sd-concepts-library/mikako-methodi2i" #@param {type:"string"}

def image_grid(imgs, rows, cols):
    assert len(imgs) == rows*cols

    w, h = imgs[0].size
    grid = Image.new('RGB', size=(cols*w, rows*h))
    grid_w, grid_h = grid.size
    
    for i, img in enumerate(imgs):
        grid.paste(img, box=(i%cols*w, i//cols*h))
    return grid

#@title Set up the Tokenizer and the Text Encoder
tokenizer = CLIPTokenizer.from_pretrained(
    pretrained_model_name_or_path,
    subfolder="tokenizer",
    use_auth_token=YOUR_TOKEN,
)
text_encoder = CLIPTextModel.from_pretrained(
    pretrained_model_name_or_path, subfolder="text_encoder", use_auth_token=YOUR_TOKEN
)

#@title Load the newly learned embeddings into CLIP
def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, token=None):
  loaded_learned_embeds = torch.load(learned_embeds_path, map_location="cpu")
  
  # separate token and the embeds
  trained_token = list(loaded_learned_embeds.keys())[0]
  embeds = loaded_learned_embeds[trained_token]

  # cast to dtype of text_encoder
  dtype = text_encoder.get_input_embeddings().weight.dtype
  embeds.to(dtype)

  # add the token in tokenizer
  token = token if token is not None else trained_token
  num_added_tokens = tokenizer.add_tokens(token)
  if num_added_tokens == 0:
    raise ValueError(f"The tokenizer already contains the token {token}. Please pass a different `token` that is not already in the tokenizer.")
  
  # resize the token embeddings
  text_encoder.resize_token_embeddings(len(tokenizer))
  
  # get the id for the token and assign the embeds
  token_id = tokenizer.convert_tokens_to_ids(token)
  text_encoder.get_input_embeddings().weight.data[token_id] = embeds
load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer)

def crop_center(pil_img, crop_width, crop_height):
    img_width, img_height = pil_img.size
    return pil_img.crop(((img_width - crop_width) // 2,
                         (img_height - crop_height) // 2,
                         (img_width + crop_width) // 2,
                         (img_height + crop_height) // 2))
                         
#@title Run the Stable Diffusion pipeline
#@markdown Don't forget to use the placeholder token in your prompt

from torch import autocast

#pipe = StableDiffusionPipeline.from_pretrained(
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
    pretrained_model_name_or_path,
    torch_dtype=torch.float16,
    text_encoder=text_encoder,
    tokenizer=tokenizer,
    use_auth_token=True,
    crop = True,
).to(device)
pipe.enable_attention_slicing()



def inmm(init_image, prompt):
    (w,h) = init_image.size
    if w>h :
      init_image = init_image.crop(((w - h) // 2,0,(w-h)//2 + h,h))
      init_image = init_image.resize(512,512)

    with autocast(device):
      image = pipe([prompt], num_inference_steps=50, guidance_scale=7, init_image=init_image)["sample"]
    return image[0]

demo = gr.Interface(inmm, inputs=[gr.Image(shape=(512, 512),type="pil"),gr.Textbox(lines=2, placeholder="どんな絵が欲しいか",value ="a heartwarming and calming landscape drawing in <m-mi2i> style")], outputs="image",
      examples=[["a_img.png", "A heartwarming Canadian wheat field scene in <m-mi2i> style, some houses, silos, and a lake in the distance"],
                ["c_img.png","A heartwarming Landscape on the lake, scenery mirrored on the lake, in <m-mi2i> style"]])
demo.launch()