from PIL import Image
from rembg import remove
import cairosvg
import io
import numpy as np
from sklearn.cluster import KMeans
from PIL import Image, ImageDraw, ImageFont
import random
import gradio as gr
def generate(logo=None, Vtubername="", sdkey=""):
if(logo==None):
gr.Warning('Please Select Your Photo📸')
if(Vtubername==""):
Vtubername = "unkown"
gr.Warning('Please Select Your Name😱')
if(sdkey==""):
gr.Warning('Please Set valid Stability AI API Key🔑')
def extract_dominant_colors(img, num_colors=3, ignore_edges=True):
if img.mode == 'RGBA':
image = img.convert('RGB')
else:
image = img
image = image.resize((150, 150))
data = np.array(image)
pixels = data.reshape(-1, 3)
if ignore_edges:
edge_pixels = np.concatenate([data[0, :, :], data[-1, :, :], data[:, 0, :], data[:, -1, :]], axis=0)
edge_colors, counts = np.unique(edge_pixels, axis=0, return_counts=True)
background_color = edge_colors[counts.argmax()]
pixels = pixels[~np.all(pixels == background_color, axis=1)]
if len(pixels) == 0:
return np.array([background_color,np.array([60,60,60]),np.array([255,255,255])])
elif len(pixels) == 1:
return np.array([pixels[0],np.array([60,60,60]),np.array([255,255,255])])
elif len(pixels) == 2:
return np.array([pixels[0],pixels[1],np.array([60,60,60])])
model = KMeans(n_clusters=3)
model.fit(pixels)
colors = model.cluster_centers_
colors = colors.round(0).astype(int)
return colors
dominant_colors = extract_dominant_colors(logo, num_colors=3)
template_prime_colors = {
"black color": [0, 0, 0],
"white": [255, 255, 255],
"red": [255, 0, 0],
"lightgreen": [0, 255, 0],
"blue": [0, 0, 255],
"yellow": [255, 255, 0],
"lightblue": [0, 255, 255],
"pink": [255, 0, 255],
"gray": [128, 128, 128],
"maroon": [128, 0, 0],
"olive": [128, 128, 0],
"green": [0, 128, 0],
"purple": [128, 0, 128],
"navy": [0, 0, 128],
"orange": [255, 165, 0],
"bluegreen": [0, 128, 128],
"lightpurple": [128, 128, 255],
"skyblue color": [0, 128, 255],
"brown": [139,69,19],
}
_primary_color = dominant_colors[0]
closest_color = "black color"
for color in template_prime_colors:
if np.linalg.norm(np.array(template_prime_colors[color]) - _primary_color) < np.linalg.norm(np.array(template_prime_colors[closest_color]) - _primary_color):
closest_color = color
primary_color = closest_color
print(primary_color)
secondary_color=str("rgb("+str(dominant_colors[1][0])+", "+str(dominant_colors[1][1])+", "+str(dominant_colors[1][2])+")")
third_color=str("rgb("+str(dominant_colors[2][0])+", "+str(dominant_colors[2][1])+", "+str(dominant_colors[2][2])+")")
import requests
from huggingface_hub import InferenceClient
client = InferenceClient(model="mistralai/Mixtral-8x7B-Instruct-v0.1")
output = client.text_generation("Make this english to Japanese Hiragana. ex. Robert->はろー HuggingFace->はぎんぐふぇいす "+Vtubername+"->")
hiragana = ""
for char in output:
if '\u3040' <= char <= '\u309f':
hiragana += char
response = requests.post(
f"https://api.stability.ai/v2beta/stable-image/generate/sd3",
headers={
"authorization": f"Bearer "+sdkey,
"accept": "image/*"
},
files={"none": ''},
data={
"model": "sd3",
"prompt": "pop sweety cute kawaii font anime title logo drawn by adobe illustorator. Logo for kids amime. The title logo text is \""+Vtubername+"\""+", The logo text color:"+primary_color + ". Single Logo only.",
"negative_prompt": "subtitle,face, ruby text, smoke, subscript, superscript, multiple titles, character, ugly, blurry, dirty, character face, face, watermark, low res, cropped, worst quality, jpeg artifacts, , picture frame, out of frame,animal, person face, low-res, blurry, blur, out of focus, disgusting",
"output_format": "jpeg",
},
)
image = None
if response.status_code == 200:
image = response.content
else:
gr.Warning('Your message is not allowed!')
raise Exception(str(response.json()))
image = Image.open(io.BytesIO(response.content))
title_logo=remove(image)
def get_brightness(color):
red, green, blue = color
return (red * 0.299 + green * 0.587 + blue * 0.114) / 255
brighter_color = secondary_color if get_brightness(dominant_colors[1]) > get_brightness(dominant_colors[2]) else third_color
darker_cplor = secondary_color if get_brightness(dominant_colors[1]) < get_brightness(dominant_colors[2]) else third_color
font_color=brighter_color
font_size=100
stroke_width=int(100*0.1)
stroke_color=darker_cplor
# Load the font
font = ImageFont.truetype("oshigo.otf", size=font_size)
japanese_text = hiragana
# Image setup
tile_width, tile_height = int(font_size*1.4), int(font_size*1.4) # Size of individual tiles
num_tiles = len(japanese_text)
total_width = tile_width * num_tiles
total_height = tile_height
# Create a new blank image
result_image = Image.new('RGBA', (total_width, total_height), (0, 0, 0, 0))
draw = ImageDraw.Draw(result_image)
for i, char in enumerate(japanese_text):
# Create an image for each character with transparency
tile_image = Image.new('RGBA', (tile_width, tile_height), (0, 0, 0, 0))
tile_draw = ImageDraw.Draw(tile_image)
# Calculate text position: random within the tile
text_width, text_height = draw.textsize(char, font=font)
x = random.randint(0, (tile_width - text_width)//1.25)
y = random.randint(0, (tile_height - text_height)//1.25)
# Draw text on the tile
tile_draw.text((x, y), char, font=font, fill="white", stroke_width=stroke_width, stroke_fill=stroke_color)
# Paste the tile into the result image
result_image.paste(tile_image, (i * tile_width, 0), tile_image)
# Save or display the image
caption = result_image
def resize_caption_to_logo(logo, caption):
if caption.width > logo.width:
scaler = 2.4
resized_caption = caption.resize((int(logo.width*scaler), int(scaler*caption.height * logo.width / caption.width )))
print("resizing")
return resized_caption
else:
return caption
caption = resize_caption_to_logo(logo, caption)
center=((title_logo.width - caption.width) // 2,title_logo.height//2)
bottom=(title_logo.width-caption.width)//2,int(title_logo.height-caption.height-100)
lower_right=(title_logo.width-caption.width-40,int(title_logo.height-caption.height-80))
upper_right=(title_logo.width-caption.width-40,int(caption.height+80))
# Define the possible positions
positions = [
("center", center),
("bottom", bottom),
("lower_right", lower_right),
("upper_right", upper_right),
]
# Randomly select a position
position, coordinates = random.choice(positions)
# Paste the caption at the selected position
title_logo.paste(caption, coordinates, caption)
return title_logo
css="""
.gradio-container{
background-color: #fff;
background-image:
radial-gradient(#b4f3ea 0%, transparent 30%),
radial-gradient(#ffffcc 0%, transparent 30%); background-size: 40px 40px;
background-position: 0 0, 20px 20px;
}
h1{
font-size: 400%!important;
background: linear-gradient(to bottom, pink, white);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
-webkit-text-stroke: 2px pink;
-webkit-text-stroke-width: 2px;
-webkit-text-stroke-color: pink;
}
"""
import os
iface = gr.Interface(
theme=gr.themes.Default(primary_hue="pink",font=[gr.themes.GoogleFont("Mochiy Pop One")]),
css=css,
fn=generate,
inputs=[gr.Image(label="Your Photo", type="pil"), gr.Textbox(label="Your Name(*alphabet only!*)"), gr.Textbox(label="Stability AI API Key")],
outputs=gr.Image(label="Generated Logo"),
title="Kawaii Logo Generator",
description="①Upload photo you wanna make Kawaii❤️
② Input the name(*alphabet only!*)⭐️
③ Set your Stability AI API key🔑(https://platform.stability.ai/account/keys)
④Press Submit🧙",
#examples=[["image.jpeg", "gojiteji", os.environ["sdkey"]]],
allow_flagging=False
)
# Launch the interface
iface.launch(debug=True)