import torch
from modelscope import AutoTokenizer, AutoModelForCausalLM
from AIGC原理与实践.text_clip_model import TextModel
import random

# 加载 CLIP 模型
device = "cuda:0" if torch.cuda.is_available() else "cpu"

text_model = TextModel()
text_model.load_model()
# 输入文本
text = "A silver-plated locket necklace with a hinged closure."
neg_txt = "Text, close ups, cropping, framing, worst quality, low quality, JPEG artifacts, PGLY, Repetition, pathology, deformity, excess fingers, mutated hands, poorly drawn hands, poorly drawn faces, mutation, deformity, blurriness, dehydration, poor anatomy, poor proportions, excess limbs, cloned faces, disfigurement, disgusting proportions, deformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck"

# 生成模型（假设已经定义）
pipeline_t2i_stable = AutoModelForCausalLM.from_pretrained("I:\\models\\AI-ModelScope\\stable-diffusion-3___5-medium",trust_remote_code=True)
pipeline_t2i_stable.to(device)

def generator(seed):
    generator = torch.Generator(device=device)
    generator.manual_seed(int(seed))
    image = pipeline_t2i_stable(prompt=text, negative_prompt=neg_txt,
                                width=1024,height=1024,
                                num_inference_steps=50,
                                generator=generator,
                                guidance_scale=7.5).images[0]
    return image

# 优化过程
num_steps = 10000
learning_rate = 0.01
maxS0=0
for step in range(num_steps):
    # 生成图像
    seed = random.randint(100000, 10000000)

    generated_image = generator(seed)
    # 保存生成的图像
    S0 = text_model.compute_clip_similarity_prompt_image(text, generated_image)
    if S0 > maxS0:
        maxS0 = S0
        prev_img_path = f"generated_image_{seed}_{S0}.png"
        print(f"{seed} S0 {S0} {prev_img_path}")
        generated_image.save(prev_img_path)


