Update main.py
Browse files
main.py
CHANGED
@@ -51,45 +51,7 @@ def generate_image_caption(image_path):
|
|
51 |
out = sd_pipe(img_tensor, guidance_scale=3)
|
52 |
out["images"][0].save("img1.jpg")
|
53 |
|
54 |
-
# Blip image captioning
|
55 |
-
raw_image = Image.open(image_path).convert("RGB")
|
56 |
|
57 |
-
processor = BlipProcessor.from_pretrained(
|
58 |
-
"Salesforce/blip-image-captioning-large"
|
59 |
-
)
|
60 |
-
model = BlipForConditionalGeneration.from_pretrained(
|
61 |
-
"Salesforce/blip-image-captioning-large"
|
62 |
-
).to(device)
|
63 |
-
|
64 |
-
# Conditional image captioning
|
65 |
-
text = "a photography of"
|
66 |
-
inputs = processor(raw_image, text, return_tensors="pt").to(device)
|
67 |
-
out = model.generate(**inputs)
|
68 |
-
caption = processor.decode(out[0], skip_special_tokens=True)
|
69 |
-
|
70 |
-
# Unconditional image captioning
|
71 |
-
inputs = processor(raw_image, return_tensors="pt").to(device)
|
72 |
-
out = model.generate(**inputs)
|
73 |
-
caption = processor.decode(out[0], skip_special_tokens=True)
|
74 |
-
|
75 |
-
# Stable diffusion pipeline
|
76 |
-
model_id = "prompthero/openjourney"
|
77 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
78 |
-
model_id, torch_dtype=torch.float32
|
79 |
-
)
|
80 |
-
pipe = pipe.to(device)
|
81 |
-
|
82 |
-
Room = "Living Room"
|
83 |
-
AI_Intervention = "High"
|
84 |
-
Mode = "Redesign"
|
85 |
-
Design = "Modern"
|
86 |
-
prompt = (
|
87 |
-
f"Give me a realistic and complete image of {caption} "
|
88 |
-
f"which room type: {Room}, AI Intervention: {AI_Intervention}, "
|
89 |
-
f"Mode: {Mode} and Design style: {Design}"
|
90 |
-
)
|
91 |
-
image = pipe(prompt).images[0]
|
92 |
-
image.save("result3.jpg")
|
93 |
|
94 |
|
95 |
|
|
|
51 |
out = sd_pipe(img_tensor, guidance_scale=3)
|
52 |
out["images"][0].save("img1.jpg")
|
53 |
|
|
|
|
|
54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
|
57 |
|