Ashrafb commited on
Commit
7ced8ee
โ€ข
1 Parent(s): e692c7f

Upload main (4).py

Browse files
Files changed (1) hide show
  1. main (4).py +141 -0
main (4).py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from fastapi.staticfiles import StaticFiles
3
+ from fastapi.responses import FileResponse
4
+ import gradio as gr
5
+ import os
6
+ import sys
7
+ import random
8
+ import string
9
+ import time
10
+ from queue import Queue
11
+ from threading import Thread
12
+ import requests
13
+ import io
14
+ from PIL import Image
15
+ import base64
16
+
17
+ app = FastAPI()
18
+
19
+ API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic"
20
+ API_TOKEN = os.getenv("HF_READ_TOKEN") # it is free
21
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
22
+ def generate_image(prompt):
23
+ try:
24
+ payload = {"inputs": prompt}
25
+ response = requests.post(API_URL, headers=headers, json=payload)
26
+ response.raise_for_status() # Raise an exception for HTTP errors
27
+ image_bytes = response.content
28
+ image = Image.open(io.BytesIO(image_bytes))
29
+ return image
30
+ except Exception as e:
31
+ # Handle the error gracefully, such as logging the error or returning a default image
32
+ print(f"Error generating image: {e}")
33
+ return None # Return None or a default image in case of error
34
+
35
+
36
+
37
+
38
+
39
+ text_gen = gr.Interface.load("models/Gustavosta/MagicPrompt-Stable-Diffusion")
40
+ proc1 = gr.Interface.load("models/playgroundai/playground-v2-1024px-aesthetic")
41
+
42
+ queue = Queue()
43
+ queue_threshold = 100
44
+
45
+ def add_random_noise(prompt, noise_level=0.00):
46
+ if noise_level == 0:
47
+ noise_level = 0.00
48
+ percentage_noise = noise_level * 5
49
+ num_noise_chars = int(len(prompt) * (percentage_noise / 100))
50
+ noise_indices = random.sample(range(len(prompt)), num_noise_chars)
51
+ prompt_list = list(prompt)
52
+ noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits)
53
+ noise_chars.extend(['๐Ÿ˜', '๐Ÿ’ฉ', '๐Ÿ˜‚', '๐Ÿค”', '๐Ÿ˜Š', '๐Ÿค—', '๐Ÿ˜ญ', '๐Ÿ™„', '๐Ÿ˜ท', '๐Ÿคฏ', '๐Ÿคซ', '๐Ÿฅด', '๐Ÿ˜ด', '๐Ÿคฉ', '๐Ÿฅณ', '๐Ÿ˜”', '๐Ÿ˜ฉ', '๐Ÿคช', '๐Ÿ˜‡', '๐Ÿคข', '๐Ÿ˜ˆ', '๐Ÿ‘น', '๐Ÿ‘ป', '๐Ÿค–', '๐Ÿ‘ฝ', '๐Ÿ’€', '๐ŸŽƒ', '๐ŸŽ…', '๐ŸŽ„', '๐ŸŽ', '๐ŸŽ‚', '๐ŸŽ‰', '๐ŸŽˆ', '๐ŸŽŠ', '๐ŸŽฎ', 'โค๏ธ', '๐Ÿ’”', '๐Ÿ’•', '๐Ÿ’–', '๐Ÿ’—', '๐Ÿถ', '๐Ÿฑ', '๐Ÿญ', '๐Ÿน', '๐ŸฆŠ', '๐Ÿป', '๐Ÿจ', '๐Ÿฏ', '๐Ÿฆ', '๐Ÿ˜', '๐Ÿ”ฅ', '๐ŸŒง๏ธ', '๐ŸŒž', '๐ŸŒˆ', '๐Ÿ’ฅ', '๐ŸŒด', '๐ŸŒŠ', '๐ŸŒบ', '๐ŸŒป', '๐ŸŒธ', '๐ŸŽจ', '๐ŸŒ…', '๐ŸŒŒ', 'โ˜๏ธ', 'โ›ˆ๏ธ', 'โ„๏ธ', 'โ˜€๏ธ', '๐ŸŒค๏ธ', 'โ›…๏ธ', '๐ŸŒฅ๏ธ', '๐ŸŒฆ๏ธ', '๐ŸŒง๏ธ', '๐ŸŒฉ๏ธ', '๐ŸŒจ๏ธ', '๐ŸŒซ๏ธ', 'โ˜”๏ธ', '๐ŸŒฌ๏ธ', '๐Ÿ’จ', '๐ŸŒช๏ธ', '๐ŸŒˆ'])
54
+ for index in noise_indices:
55
+ prompt_list[index] = random.choice(noise_chars)
56
+ return "".join(prompt_list)
57
+
58
+ # Existing code...
59
+
60
+ import uuid # Import the UUID library
61
+
62
+ # Existing code...
63
+
64
+ # Existing code...
65
+
66
+ request_counter = 0 # Global counter to track requests
67
+
68
+ def send_it1(inputs, noise_level, proc=proc1):
69
+ global request_counter
70
+ request_counter += 1
71
+ timestamp = f"{time.time()}_{request_counter}"
72
+ prompt_with_noise = add_random_noise(inputs, noise_level) + f" - {timestamp}"
73
+ while queue.qsize() >= queue_threshold:
74
+ time.sleep(2)
75
+ queue.put(prompt_with_noise)
76
+ output = proc(prompt_with_noise)
77
+ return output
78
+
79
+
80
+ def get_prompts(prompt_text):
81
+ if not prompt_text:
82
+ return "Please enter text before generating prompts.ุฑุฌุงุก ุงุฏุฎู„ ุงู„ู†ุต ุงูˆู„ุง"
83
+ raise gr.Error("Please enter text before generating prompts.ุฑุฌุงุก ุงุฏุฎู„ ุงู„ู†ุต ุงูˆู„ุง")
84
+ else:
85
+ global request_counter
86
+ request_counter += 1
87
+ timestamp = f"{time.time()}_{request_counter}"
88
+
89
+ options = [
90
+ "Cyberpunk android",
91
+ "2060",
92
+ "newyork",
93
+ "style of laurie greasley" , "studio ghibli" , "akira toriyama" , "james gilleard" , "genshin impact" , "trending pixiv fanbox" , "acrylic palette knife, 4k, vibrant colors, devinart, trending on artstation, low details"
94
+ "Editorial Photography, Shot on 70mm lens, Depth of Field, Bokeh, DOF, Tilt Blur, Shutter Speed 1/1000, F/22, 32k, Super-Resolution, award winning,",
95
+ "high detail, warm lighting, godrays, vivid, beautiful, trending on artstation, by jordan grimmer, huge scene, grass, art greg rutkowski ",
96
+ "highly detailed, digital painting, artstation, illustration, art by artgerm and greg rutkowski and alphonse mucha.",
97
+ "Charlie Bowater, stanley artgerm lau, a character portrait, sots art, sharp focus, smooth, aesthetic, extremely detailed, octane render,solo, dark industrial background, rtx, rock clothes, cinematic light, intricate detail, highly detailed, high res, detailed facial features",
98
+ "portrait photograph" , "realistic" , "concept art" , "elegant, highly detailed" , "intricate, sharp focus, depth of field, f/1. 8, 85mm, medium shot, mid shot, (((professionally color graded)))" ," sharp focus, bright soft diffused light" , "(volumetric fog),",
99
+ "Cinematic film still" ," (dark city street:1.2)" , "(cold colors), damp, moist, intricate details" ,"shallow depth of field, [volumetric fog]" , "cinematic lighting, reflections, photographed on a Canon EOS R5, 50mm lens, F/2.8, HDR, 8k resolution" , "cinematic film still from cyberpunk movie" , "volumetric fog, (RAW, analog, masterpiece, best quality, soft particles, 8k, flawless perfect face, intricate details" , "trending on artstation, trending on cgsociety, dlsr, ultra sharp, hdr, rtx, antialiasing, canon 5d foto))" , "((skin details, high detailed skin texture))" , "(((perfect face))), (perfect eyes)))",
100
+
101
+ # Add other prompt options here...
102
+ ]
103
+
104
+ if prompt_text:
105
+ chosen_option = random.choice(options)
106
+ return text_gen(f"{prompt_text}, {chosen_option} - {timestamp}")
107
+ else:
108
+ return text_gen("", timestamp)
109
+
110
+ @app.get("/generate_prompts")
111
+ def generate_prompts(prompt_text: str):
112
+ return get_prompts(prompt_text)
113
+
114
+
115
+
116
+ @app.get("/send_inputs")
117
+ def send_inputs(inputs: str, noise_level: float):
118
+ try:
119
+ generated_image = generate_image(inputs)
120
+ if generated_image is not None:
121
+ image_bytes = io.BytesIO()
122
+ generated_image.save(image_bytes, format="JPEG")
123
+ image_base64 = base64.b64encode(image_bytes.getvalue()).decode("utf-8")
124
+ return {"image_base64": image_base64}
125
+ else:
126
+ # Return an error message if the image couldn't be generated
127
+ return {"error": "Failed to generate image."}
128
+ except Exception as e:
129
+ # Log the error and return an error message
130
+ print(f"Error generating image: {e}")
131
+ return {"error": "Failed to generate image."}
132
+
133
+
134
+
135
+ app.mount("/", StaticFiles(directory="static", html=True), name="static")
136
+
137
+ @app.get("/")
138
+ def index() -> FileResponse:
139
+ return FileResponse(path="/app/static/index.html", media_type="text/html")
140
+
141
+