|
from fastapi import FastAPI |
|
from fastapi.staticfiles import StaticFiles |
|
from fastapi.responses import FileResponse |
|
import gradio as gr |
|
import os |
|
import sys |
|
import random |
|
import string |
|
import time |
|
from queue import Queue |
|
from threading import Thread |
|
import requests |
|
import io |
|
from PIL import Image |
|
import base64 |
|
from deep_translator import GoogleTranslator |
|
|
|
app = FastAPI() |
|
|
|
API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic" |
|
API_TOKEN = os.getenv("HF_READ_TOKEN") |
|
headers = {"Authorization": f"Bearer {API_TOKEN}"} |
|
|
|
|
|
|
|
|
|
|
|
text_gen = gr.Interface.load("models/Gustavosta/MagicPrompt-Stable-Diffusion") |
|
proc1 = gr.Interface.load("models/playgroundai/playground-v2-1024px-aesthetic") |
|
|
|
queue = Queue() |
|
queue_threshold = 100 |
|
|
|
def add_random_noise(prompt, noise_level=0.00): |
|
if noise_level == 0: |
|
noise_level = 0.00 |
|
percentage_noise = noise_level * 5 |
|
num_noise_chars = int(len(prompt) * (percentage_noise / 100)) |
|
noise_indices = random.sample(range(len(prompt)), num_noise_chars) |
|
prompt_list = list(prompt) |
|
noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits) |
|
noise_chars.extend(['๐', '๐ฉ', '๐', '๐ค', '๐', '๐ค', '๐ญ', '๐', '๐ท', '๐คฏ', '๐คซ', '๐ฅด', '๐ด', '๐คฉ', '๐ฅณ', '๐', '๐ฉ', '๐คช', '๐', '๐คข', '๐', '๐น', '๐ป', '๐ค', '๐ฝ', '๐', '๐', '๐
', '๐', '๐', '๐', '๐', '๐', '๐', '๐ฎ', 'โค๏ธ', '๐', '๐', '๐', '๐', '๐ถ', '๐ฑ', '๐ญ', '๐น', '๐ฆ', '๐ป', '๐จ', '๐ฏ', '๐ฆ', '๐', '๐ฅ', '๐ง๏ธ', '๐', '๐', '๐ฅ', '๐ด', '๐', '๐บ', '๐ป', '๐ธ', '๐จ', '๐
', '๐', 'โ๏ธ', 'โ๏ธ', 'โ๏ธ', 'โ๏ธ', '๐ค๏ธ', 'โ
๏ธ', '๐ฅ๏ธ', '๐ฆ๏ธ', '๐ง๏ธ', '๐ฉ๏ธ', '๐จ๏ธ', '๐ซ๏ธ', 'โ๏ธ', '๐ฌ๏ธ', '๐จ', '๐ช๏ธ', '๐']) |
|
for index in noise_indices: |
|
prompt_list[index] = random.choice(noise_chars) |
|
return "".join(prompt_list) |
|
|
|
|
|
|
|
import uuid |
|
|
|
|
|
|
|
|
|
|
|
request_counter = 0 |
|
|
|
def send_it1(inputs, noise_level, proc=proc1): |
|
global request_counter |
|
request_counter += 1 |
|
timestamp = f"{time.time()}_{request_counter}" |
|
prompt_with_noise = add_random_noise(inputs, noise_level) + f" - {timestamp}" |
|
while queue.qsize() >= queue_threshold: |
|
time.sleep(2) |
|
queue.put(prompt_with_noise) |
|
output = proc(prompt_with_noise) |
|
return output |
|
|
|
def generate_image(inputs, is_negative, steps, cfg_scale, seed): |
|
try: |
|
global request_counter |
|
request_counter += 1 |
|
timestamp = f"{time.time()}_{request_counter}" |
|
|
|
|
|
translator_to_en = GoogleTranslator(source='auto', target='english') |
|
english_inputs = translator_to_en.translate(inputs) |
|
|
|
prompt_with_noise = add_random_noise(english_inputs) + f" - {timestamp}" |
|
payload = { |
|
"inputs": prompt_with_noise, |
|
"is_negative": is_negative, |
|
"steps": steps, |
|
"cfg_scale": cfg_scale, |
|
"seed": seed if seed is not None else random.randint(-1, 2147483647) |
|
} |
|
response = requests.post(API_URL, headers=headers, json=payload) |
|
response.raise_for_status() |
|
image_bytes = response.content |
|
image = Image.open(io.BytesIO(image_bytes)) |
|
return image |
|
except requests.exceptions.HTTPError as e: |
|
|
|
print(f"HTTP Error: {e}") |
|
return "An unexpected error occurred while generating the image." |
|
except Exception as e: |
|
|
|
print(f"Error generating image: {e}") |
|
return "An unexpected error occurred while generating the image. Please try again later." |
|
|
|
|
|
|
|
|
|
def get_prompts(prompt_text): |
|
if not prompt_text: |
|
return "Please enter text before generating prompts.ุฑุฌุงุก ุงุฏุฎู ุงููุต ุงููุง" |
|
raise gr.Error("Please enter text before generating prompts.ุฑุฌุงุก ุงุฏุฎู ุงููุต ุงููุง") |
|
else: |
|
global request_counter |
|
request_counter += 1 |
|
timestamp = f"{time.time()}_{request_counter}" |
|
|
|
options = [ |
|
"Cyberpunk android", |
|
"2060", |
|
"newyork", |
|
"style of laurie greasley" , "studio ghibli" , "akira toriyama" , "james gilleard" , "genshin impact" , "trending pixiv fanbox" , "acrylic palette knife, 4k, vibrant colors, devinart, trending on artstation, low details" |
|
"Editorial Photography, Shot on 70mm lens, Depth of Field, Bokeh, DOF, Tilt Blur, Shutter Speed 1/1000, F/22, 32k, Super-Resolution, award winning,", |
|
"high detail, warm lighting, godrays, vivid, beautiful, trending on artstation, by jordan grimmer, huge scene, grass, art greg rutkowski ", |
|
"highly detailed, digital painting, artstation, illustration, art by artgerm and greg rutkowski and alphonse mucha.", |
|
"Charlie Bowater, stanley artgerm lau, a character portrait, sots art, sharp focus, smooth, aesthetic, extremely detailed, octane render,solo, dark industrial background, rtx, rock clothes, cinematic light, intricate detail, highly detailed, high res, detailed facial features", |
|
"portrait photograph" , "realistic" , "concept art" , "elegant, highly detailed" , "intricate, sharp focus, depth of field, f/1. 8, 85mm, medium shot, mid shot, (((professionally color graded)))" ," sharp focus, bright soft diffused light" , "(volumetric fog),", |
|
"Cinematic film still" ," (dark city street:1.2)" , "(cold colors), damp, moist, intricate details" ,"shallow depth of field, [volumetric fog]" , "cinematic lighting, reflections, photographed on a Canon EOS R5, 50mm lens, F/2.8, HDR, 8k resolution" , "cinematic film still from cyberpunk movie" , "volumetric fog, (RAW, analog, masterpiece, best quality, soft particles, 8k, flawless perfect face, intricate details" , "trending on artstation, trending on cgsociety, dlsr, ultra sharp, hdr, rtx, antialiasing, canon 5d foto))" , "((skin details, high detailed skin texture))" , "(((perfect face))), (perfect eyes)))", |
|
|
|
|
|
] |
|
|
|
if prompt_text: |
|
chosen_option = random.choice(options) |
|
return text_gen(f"{prompt_text}, {chosen_option} - {timestamp}") |
|
else: |
|
return text_gen("", timestamp) |
|
|
|
def initialize_api_connection(): |
|
global headers |
|
API_TOKEN = os.getenv("HF_READ_TOKEN") |
|
headers = {"Authorization": f"Bearer {API_TOKEN}"} |
|
|
|
|
|
initialize_api_connection() |
|
|
|
@app.get("/generate_prompts") |
|
def generate_prompts(prompt_text: str): |
|
return get_prompts(prompt_text) |
|
|
|
|
|
|
|
from fastapi import Query |
|
|
|
from fastapi import HTTPException |
|
|
|
@app.get("/send_inputs") |
|
def send_inputs( |
|
inputs: str, |
|
noise_level: float, |
|
is_negative: str, |
|
steps: int = 20, |
|
cfg_scale: int = 4.5, |
|
seed: int = None |
|
): |
|
try: |
|
generated_image = generate_image(inputs, is_negative, steps, cfg_scale, seed) |
|
if generated_image is not None: |
|
image_bytes = io.BytesIO() |
|
generated_image.save(image_bytes, format="JPEG") |
|
image_base64 = base64.b64encode(image_bytes.getvalue()).decode("utf-8") |
|
return {"image_base64": image_base64} |
|
else: |
|
|
|
raise HTTPException(status_code=500, detail="Failed to generate image.") |
|
except Exception as e: |
|
|
|
print(f"Error generating image: {e}") |
|
raise HTTPException(status_code=500, detail="Failed to generate image.") |
|
|
|
|
|
app.mount("/", StaticFiles(directory="static", html=True), name="static") |
|
|
|
@app.get("/") |
|
def index() -> FileResponse: |
|
return FileResponse(path="/app/static/index.html", media_type="text/html") |
|
|
|
|
|
|