File size: 8,031 Bytes
7ced8ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b972196
7ced8ee
 
 
5591fe3
7ced8ee
 
 
 
 
 
 
 
aa875bc
7ced8ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210954c
cf47e36
 
 
 
b972196
 
 
 
 
 
210954c
 
 
 
 
 
 
cf47e36
 
 
 
 
10e3b88
 
 
 
cf47e36
10e3b88
 
 
 
 
7ced8ee
b972196
7ced8ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5da4270
 
 
 
 
 
 
 
7ced8ee
 
 
 
 
 
dd428b8
 
7563673
 
7ced8ee
dd428b8
 
 
0f7e40c
c5c8ce8
 
dd428b8
 
7ced8ee
dd428b8
7ced8ee
 
 
 
 
 
 
7563673
7ced8ee
 
 
7563673
7ced8ee
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
import gradio as gr
import os
import sys
import random
import string
import time
from queue import Queue
from threading import Thread
import requests
import io
from PIL import Image
import base64
from deep_translator import GoogleTranslator

app = FastAPI()

API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2.5-1024px-aesthetic"
API_TOKEN = os.getenv("HF_READ_TOKEN") # it is free
headers = {"Authorization": f"Bearer {API_TOKEN}"}





text_gen = gr.Interface.load("models/Gustavosta/MagicPrompt-Stable-Diffusion")
proc1 = gr.Interface.load("models/playgroundai/playground-v2-1024px-aesthetic")

queue = Queue()
queue_threshold = 100

def add_random_noise(prompt, noise_level=0.00):
    if noise_level == 0:
        noise_level = 0.00
    percentage_noise = noise_level * 5
    num_noise_chars = int(len(prompt) * (percentage_noise / 100))
    noise_indices = random.sample(range(len(prompt)), num_noise_chars)
    prompt_list = list(prompt)
    noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits)
    noise_chars.extend(['๐Ÿ˜', '๐Ÿ’ฉ', '๐Ÿ˜‚', '๐Ÿค”', '๐Ÿ˜Š', '๐Ÿค—', '๐Ÿ˜ญ', '๐Ÿ™„', '๐Ÿ˜ท', '๐Ÿคฏ', '๐Ÿคซ', '๐Ÿฅด', '๐Ÿ˜ด', '๐Ÿคฉ', '๐Ÿฅณ', '๐Ÿ˜”', '๐Ÿ˜ฉ', '๐Ÿคช', '๐Ÿ˜‡', '๐Ÿคข', '๐Ÿ˜ˆ', '๐Ÿ‘น', '๐Ÿ‘ป', '๐Ÿค–', '๐Ÿ‘ฝ', '๐Ÿ’€', '๐ŸŽƒ', '๐ŸŽ…', '๐ŸŽ„', '๐ŸŽ', '๐ŸŽ‚', '๐ŸŽ‰', '๐ŸŽˆ', '๐ŸŽŠ', '๐ŸŽฎ', 'โค๏ธ', '๐Ÿ’”', '๐Ÿ’•', '๐Ÿ’–', '๐Ÿ’—', '๐Ÿถ', '๐Ÿฑ', '๐Ÿญ', '๐Ÿน', '๐ŸฆŠ', '๐Ÿป', '๐Ÿจ', '๐Ÿฏ', '๐Ÿฆ', '๐Ÿ˜', '๐Ÿ”ฅ', '๐ŸŒง๏ธ', '๐ŸŒž', '๐ŸŒˆ', '๐Ÿ’ฅ', '๐ŸŒด', '๐ŸŒŠ', '๐ŸŒบ', '๐ŸŒป', '๐ŸŒธ', '๐ŸŽจ', '๐ŸŒ…', '๐ŸŒŒ', 'โ˜๏ธ', 'โ›ˆ๏ธ', 'โ„๏ธ', 'โ˜€๏ธ', '๐ŸŒค๏ธ', 'โ›…๏ธ', '๐ŸŒฅ๏ธ', '๐ŸŒฆ๏ธ', '๐ŸŒง๏ธ', '๐ŸŒฉ๏ธ', '๐ŸŒจ๏ธ', '๐ŸŒซ๏ธ', 'โ˜”๏ธ', '๐ŸŒฌ๏ธ', '๐Ÿ’จ', '๐ŸŒช๏ธ', '๐ŸŒˆ'])
    for index in noise_indices:
        prompt_list[index] = random.choice(noise_chars)
    return "".join(prompt_list)

# Existing code...

import uuid  # Import the UUID library

# Existing code...

# Existing code...

request_counter = 0  # Global counter to track requests

def send_it1(inputs, noise_level, proc=proc1):
    global request_counter
    request_counter += 1
    timestamp = f"{time.time()}_{request_counter}"
    prompt_with_noise = add_random_noise(inputs, noise_level) + f" - {timestamp}"
    while queue.qsize() >= queue_threshold:
            time.sleep(2)
    queue.put(prompt_with_noise)
    output = proc(prompt_with_noise)
    return output
    
def generate_image(inputs, is_negative, steps, cfg_scale, seed):
    try:
        global request_counter
        request_counter += 1
        timestamp = f"{time.time()}_{request_counter}"
        
        # Translate inputs to English
        translator_to_en = GoogleTranslator(source='auto', target='english')
        english_inputs = translator_to_en.translate(inputs)
        
        prompt_with_noise = add_random_noise(english_inputs) + f" - {timestamp}"
        payload = {
            "inputs": prompt_with_noise,
            "is_negative": is_negative,
            "steps": steps,
            "cfg_scale": cfg_scale,
            "seed": seed if seed is not None else random.randint(-1, 2147483647)
        }
        response = requests.post(API_URL, headers=headers, json=payload)
        response.raise_for_status()  # Raise an exception for HTTP errors
        image_bytes = response.content
        image = Image.open(io.BytesIO(image_bytes))
        return image
    except requests.exceptions.HTTPError as e:
        # Handle any HTTP errors
        print(f"HTTP Error: {e}")
        return "An unexpected error occurred while generating the image."
    except Exception as e:
        # Handle other exceptions
        print(f"Error generating image: {e}")
        return "An unexpected error occurred while generating the image. Please try again later."




def get_prompts(prompt_text):
    if not prompt_text:
        return "Please enter text before generating prompts.ุฑุฌุงุก ุงุฏุฎู„ ุงู„ู†ุต ุงูˆู„ุง" 
        raise gr.Error("Please enter text before generating prompts.ุฑุฌุงุก ุงุฏุฎู„ ุงู„ู†ุต ุงูˆู„ุง")
    else:
        global request_counter
        request_counter += 1
        timestamp = f"{time.time()}_{request_counter}"

    options = [
        "Cyberpunk android",
        "2060",
        "newyork",
        "style of laurie greasley" , "studio ghibli" , "akira toriyama" , "james gilleard" , "genshin impact" , "trending pixiv fanbox" , "acrylic palette knife, 4k, vibrant colors, devinart, trending on artstation, low details"
        "Editorial Photography, Shot on 70mm lens, Depth of Field, Bokeh, DOF, Tilt Blur, Shutter Speed 1/1000, F/22,  32k, Super-Resolution, award winning,", 
         "high detail, warm lighting, godrays, vivid, beautiful, trending on artstation, by jordan grimmer, huge scene, grass, art greg rutkowski ", 
         "highly detailed, digital painting, artstation, illustration, art by artgerm and greg rutkowski and alphonse mucha.", 
         "Charlie Bowater, stanley artgerm lau, a character portrait, sots art, sharp focus, smooth, aesthetic, extremely detailed, octane render,solo, dark industrial background, rtx, rock clothes, cinematic light, intricate detail, highly detailed, high res, detailed facial features", 
        "portrait photograph" , "realistic" , "concept art" , "elegant, highly detailed" , "intricate, sharp focus, depth of field, f/1. 8, 85mm, medium shot, mid shot, (((professionally color graded)))" ," sharp focus, bright soft diffused light" , "(volumetric fog),", 
        "Cinematic film still" ," (dark city street:1.2)" , "(cold colors), damp, moist, intricate details" ,"shallow depth of field, [volumetric fog]" , "cinematic lighting, reflections, photographed on a Canon EOS R5, 50mm lens, F/2.8, HDR, 8k resolution" , "cinematic film still from cyberpunk movie" , "volumetric fog,  (RAW, analog, masterpiece, best quality, soft particles, 8k, flawless perfect face, intricate details" , "trending on artstation, trending on cgsociety, dlsr, ultra sharp, hdr, rtx, antialiasing, canon 5d foto))" , "((skin details, high detailed skin texture))" , "(((perfect face))), (perfect eyes)))", 

        # Add other prompt options here...
    ]

    if prompt_text:
        chosen_option = random.choice(options)
        return text_gen(f"{prompt_text}, {chosen_option} - {timestamp}")
    else:
        return text_gen("", timestamp)

def initialize_api_connection():
    global headers
    API_TOKEN = os.getenv("HF_READ_TOKEN") # it is free
    headers = {"Authorization": f"Bearer {API_TOKEN}"}

# Run initialization functions on startup
initialize_api_connection()

@app.get("/generate_prompts")
def generate_prompts(prompt_text: str):
    return get_prompts(prompt_text)



from fastapi import Query

from fastapi import HTTPException

@app.get("/send_inputs")
def send_inputs(
    inputs: str,
    noise_level: float,
    is_negative: str,
    steps: int = 20,
    cfg_scale: int = 4.5,
    seed: int = None
):
    try:
        generated_image = generate_image(inputs, is_negative, steps, cfg_scale, seed)
        if generated_image is not None:
            image_bytes = io.BytesIO()
            generated_image.save(image_bytes, format="JPEG")
            image_base64 = base64.b64encode(image_bytes.getvalue()).decode("utf-8")
            return {"image_base64": image_base64}
        else:
            # Return an error message if the image couldn't be generated
            raise HTTPException(status_code=500, detail="Failed to generate image.")
    except Exception as e:
        # Log the error and return an error message
        print(f"Error generating image: {e}")
        raise HTTPException(status_code=500, detail="Failed to generate image.")


app.mount("/", StaticFiles(directory="static", html=True), name="static")

@app.get("/")
def index() -> FileResponse:
    return FileResponse(path="/app/static/index.html", media_type="text/html")