import os

os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"

from transformers.models.bert.tokenization_bert import *
import torch
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DDIMScheduler
from diffusers.utils import load_image

# os.environ["http_proxy"] = "http://192.168.3.116:7890/"
# os.environ["https_proxy"] = "http://192.168.3.116:7890/"

import torch
from diffusers import StableDiffusionPipeline
from transformers import AutoTokenizer, CLIPTextModel


# revision="fp16"
# sd2_id = "FFusion/di.FFUSION.ai-v2.1-768-BaSE-alpha"
# # sd2_id = "FFusion/FFusion-BaSE"
#
# pipe = StableDiffusionPipeline.from_pretrained(sd2_id, revision=revision, torch_dtype=torch.float16)
#
# pipe.to("cuda")
#
# image = pipe("Cyberpunk style city streets, 8K resolution, CG rendering", guidance_scale=10, num_inference_steps=20).images[0]
# image.save("cyberpunk.jpeg")


image_path="data/12d/00000.png"
image_path="data/13l/00000.png"
# image_path="data/image/3.png"
init_image = load_image(image_path)

revision="fp16"
revision=None
sd2_id = "FFusion/di.FFUSION.ai-v2.1-768-BaSE-alpha"
# sd2_id = "FFusion/FFusion-BaSE"

pipe = StableDiffusionImg2ImgPipeline.from_pretrained(sd2_id, revision=revision, torch_dtype=torch.float16)

pipe.to("cuda")

image = pipe("A small Shiba Inu, 2d anime style, 8K resolution, CG rendering", guidance_scale=10, image=init_image,strength=0.5).images[0]
image.save("FFUSION.jpeg")