Spaces:
Running
Running
import gradio as gr | |
import replicate | |
from openai import OpenAI | |
from PIL import Image | |
import requests | |
from io import BytesIO | |
import os | |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') | |
REPLICATE_API_TOKEN = os.getenv('REPLICATE_API_TOKEN') | |
def generate_image_openai(prompt): | |
client = OpenAI() | |
response = client.images.generate( | |
model="dall-e-3", | |
prompt=prompt, | |
size="1024x1024", | |
# size="512x512", | |
n=1, | |
) | |
return response.data[0].url, response.data[0].revised_prompt | |
def style_transfer(input_image_path, style_image_path, prompt_det): | |
input = { | |
# "image": open(input_image_path, "rb"), | |
"image": input_image_path, | |
"image_style": open(style_image_path, "rb"), | |
"style_strength": 0.4, | |
"structure_strength":1.2, | |
"negative_prompt": "hands, fingers, feet, legs, shoes", | |
"prompt": " natural light, natural bright colors, low quality, candid, grainy, instagram photo, phone camera, high iso noisy "+prompt_det , | |
"seed": 42, | |
"guidance_scale": 5 | |
} | |
# output = replicate.run( | |
# # "prakharsaxena24/2d-to-real-style:fef4d74fb7d11df35aa4bbdf3d8671b4d0352464dc67b169968393c657ab6038", | |
# input=input | |
# ) | |
# return output[0] | |
deployment = replicate.deployments.get("2clicksmedia/my-app-photorealism") | |
prediction = deployment.predictions.create( | |
input=input | |
) | |
prediction.wait() | |
return prediction.output[0] | |
def upscale_image(image_path, prompt_det): | |
input = { | |
"image": image_path, | |
"prompt": "candid photo, high iso, phone camera, grainy <lora:more_details:0.5> , symmetric hands " + prompt_det, | |
"scale_factor": 3, | |
"negative_prompt": "hands, fingers, feet, legs, shoes", | |
} | |
output = replicate.run( | |
"philz1337x/clarity-upscaler:eba39f520856d5e61a8ad56fd57f97be2fa30de65e29d8e94db5209a1827cd59", | |
# "prakharsaxena24/calrity-upscaler-private", | |
input=input) | |
return output[0] | |
# deployment = replicate.deployments.get("2clicksmedia/upscaler") | |
# prediction = deployment.predictions.create( | |
# input=input | |
# ) | |
# prediction.wait() | |
# return prediction.output[0] | |
def get_keyword_prompt(image_url): | |
client = OpenAI() | |
response = client.chat.completions.create( | |
model="gpt-4o", | |
messages=[ | |
{ | |
"role": "user", | |
"content": [ | |
{"type": "text", "text": "Describe this image in detail, using phrases or keywords separated by commas. Include details about the person such as gender, race, and appearance excluding details about hair color, footwear. Indicate the position left or right. Keep it short and provide the information in one paragraph, separated by commas."}, | |
{ | |
"type": "image_url", | |
"image_url": { | |
"url": image_url, | |
}, | |
}, | |
], | |
} | |
], | |
# max_tokens=300, | |
) | |
# print(response) | |
return response.choices[0].message.content | |
def infer(text,title): | |
prompt = f"""Please create a simple suitable image to accompany the following text as part of an article with the title "{title}". The objects in the image must have realistic proportions, always keep people/person in focus, and keep the colors warm, try to keep it simple with few objects/concepts. Text: "{text}" | |
Please make sure not to include text in the image.""" | |
image_url_openai, revised_prompt = generate_image_openai(prompt) | |
prompt_det = get_keyword_prompt(image_url_openai) | |
style_image_url = style_transfer(image_url_openai, f'./Style.png',prompt_det) | |
upscaled_image_url = upscale_image(style_image_url, prompt_det) | |
response_dalle = requests.get(image_url_openai) | |
dalle_img = Image.open(BytesIO(response_dalle.content)) | |
response = requests.get(upscaled_image_url) | |
img = Image.open(BytesIO(response.content)) | |
return dalle_img, img | |
examples = [ | |
# "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", | |
# "An astronaut riding a green horse", | |
# "A delicious ceviche cheesecake slice", | |
] | |
# css=""" | |
# #col-container { | |
# margin: 0 auto; | |
# max-width: 520px; | |
# } | |
# """ | |
with gr.Blocks() as demo: | |
with gr.Column(elem_id="col-container"): | |
gr.Markdown(f""" | |
# Text-to-Image and style transfer. | |
""") | |
with gr.Row(): | |
text = gr.Text( | |
label="Text", | |
show_label=False, | |
max_lines=1, | |
placeholder="Enter the `text`", | |
container=False, | |
) | |
title = gr.Text( | |
label="Title", | |
show_label=False, | |
max_lines=1, | |
placeholder="Enter the `title`", | |
container=False, | |
) | |
run_button = gr.Button("Run", scale=0) | |
dalle = gr.Image(label="dalle", show_label=False) | |
result = gr.Image(label="Result", show_label=False) | |
# with gr.Accordion("Advanced Settings", open=False): | |
# negative_prompt = gr.Text( | |
# label="Negative prompt", | |
# max_lines=1, | |
# placeholder="Enter a negative prompt", | |
# visible=False, | |
# ) | |
# seed = gr.Slider( | |
# label="Seed", | |
# minimum=0, | |
# maximum=100000, | |
# step=1, | |
# value=0, | |
# ) | |
# randomize_seed = gr.Checkbox(label="Randomize seed", value=True) | |
# with gr.Row(): | |
# width = gr.Slider( | |
# label="Width", | |
# minimum=256, | |
# maximum=MAX_IMAGE_SIZE, | |
# step=32, | |
# value=512, | |
# ) | |
# height = gr.Slider( | |
# label="Height", | |
# minimum=256, | |
# maximum=MAX_IMAGE_SIZE, | |
# step=32, | |
# value=512, | |
# ) | |
# with gr.Row(): | |
# guidance_scale = gr.Slider( | |
# label="Guidance scale", | |
# minimum=0.0, | |
# maximum=10.0, | |
# step=0.1, | |
# value=0.0, | |
# ) | |
# num_inference_steps = gr.Slider( | |
# label="Number of inference steps", | |
# minimum=1, | |
# maximum=12, | |
# step=1, | |
# value=2, | |
# ) | |
# gr.Examples( | |
# examples = examples, | |
# inputs = [prompt] | |
# ) | |
run_button.click( | |
fn = infer, | |
inputs = [text, title], | |
outputs = [dalle,result] | |
) | |
demo.queue().launch() |