import gradio as gr import os import sys import random import string import time from queue import Queue from threading import Thread from transformers import MBartForConditionalGeneration, MBart50TokenizerFast model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-one-mmt") tokenizer = MBart50TokenizerFast API_URL = "models/facebook/mbart-large-50-many-to-one-mmt" API_TOKEN = os.environ.get("HF_READ_TOKEN") headers = {"Authorization": f"Bearer {API_TOKEN}"} text_gen = gr.Interface.load(API_URL, headers=headers) proc1 = gr.Interface.load("models/openskyml/midjourney-v4-xl") # ... (remaining code) # The rest of your existing code... def restart_script_periodically(): while True: random_time = random.randint(540, 600) time.sleep(random_time) os.execl(sys.executable, sys.executable, *sys.argv) restart_thread = Thread(target=restart_script_periodically, daemon=True) restart_thread.start() queue = Queue() queue_threshold = 100 def add_random_noise(prompt, noise_level=0.00): if noise_level == 0: noise_level = 0.00 percentage_noise = noise_level * 5 num_noise_chars = int(len(prompt) * (percentage_noise / 100)) noise_indices = random.sample(range(len(prompt)), num_noise_chars) prompt_list = list(prompt) noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits) noise_chars.extend(['😍', 'ðŸ’Đ', '😂', 'ðŸĪ”', '😊', 'ðŸĪ—', '😭', '🙄', '😷', 'ðŸĪŊ', 'ðŸĪŦ', 'ðŸĨī', 'ðŸ˜ī', 'ðŸĪĐ', 'ðŸĨģ', '😔', 'ðŸ˜Đ', 'ðŸĪŠ', '😇', 'ðŸĪĒ', '😈', 'ðŸ‘đ', 'ðŸ‘ŧ', 'ðŸĪ–', 'ðŸ‘―', '💀', '🎃', '🎅', '🎄', '🎁', '🎂', '🎉', '🎈', '🎊', 'ðŸŽŪ', 'âĪïļ', '💔', '💕', '💖', '💗', 'ðŸķ', 'ðŸą', '🐭', 'ðŸđ', 'ðŸĶŠ', 'ðŸŧ', 'ðŸĻ', 'ðŸŊ', 'ðŸĶ', '🐘', 'ðŸ”Ĩ', '🌧ïļ', '🌞', '🌈', 'ðŸ’Ĩ', 'ðŸŒī', '🌊', '🌚', 'ðŸŒŧ', 'ðŸŒļ', 'ðŸŽĻ', '🌅', '🌌', '☁ïļ', '⛈ïļ', '❄ïļ', '☀ïļ', 'ðŸŒĪïļ', '⛅ïļ', 'ðŸŒĨïļ', 'ðŸŒĶïļ', '🌧ïļ', 'ðŸŒĐïļ', 'ðŸŒĻïļ', 'ðŸŒŦïļ', '☔ïļ', '🌎ïļ', 'ðŸ’Ļ', '🌊ïļ', '🌈']) for index in noise_indices: prompt_list[index] = random.choice(noise_chars) return "".join(prompt_list) # Existing code... import uuid # Import the UUID library # Existing code... # Existing code... request_counter = 0 # Global counter to track requests def send_it1(inputs, noise_level, proc=proc1): global request_counter request_counter += 1 timestamp = f"{time.time()}_{request_counter}" prompt_with_noise = add_random_noise(inputs, noise_level) + f" - {timestamp}" try: while queue.qsize() >= queue_threshold: time.sleep(2) queue.put(prompt_with_noise) output = proc(prompt_with_noise) return output except Exception as e: # Display a generic error message to the user raise gr.Error("Experiencing high demand. Please retry shortly. Thank you for your patience.") import random import random import time # ... (existing code) import random import time # ... (existing code) model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-one-mmt") tokenizer = MBart50TokenizerFast def get_prompts(prompt_text): if not prompt_text: return "Please enter text before generating prompts.ØąØŽØ§ØĄ اØŊØŪŲ„ اŲ„Ų†Øĩ اŲˆŲ„ا" else: global request_counter request_counter += 1 timestamp = f"{time.time()}_{request_counter}" tokenizer.src_lang = "ar_AR" encoded_ar = tokenizer(prompt_text, return_tensors="pt") generated_tokens = model.generate(**encoded_ar) translated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0] return translated_text # Existing code... # Existing code... with gr.Blocks(css=".gradio-container {background-color: #F5F5F5;} .dark .gradio-container {background-color: linear-gradient(to top, #09203f 0%, #537895 100%);} footer{display:none !important;}",) as demo: with gr.Column(elem_id="col-container"): with gr.Row(variant="compact"): input_text = gr.Textbox( lines=8, label="Short Prompt", show_label=False, max_lines=10, placeholder="Enter a basic idea and click 'Magic Prompt'. Got no ideas? No problem, Simply just hit the magic button!", ).style( container=False, textarea={'height': '400px'} ) see_prompts = gr.Button("âœĻ Magic Prompt âœĻ").style(full_width=False) with gr.Row(variant="compact"): prompt = gr.Textbox( lines=8, label="Enter your prompt", show_label=False, max_lines=10, placeholder="Full Prompt", ).style( container=False, textarea={'height': '400px'} ) run = gr.Button("Generate Images").style(full_width=False) with gr.Row(): with gr.Row(): noise_level = gr.Slider(minimum=0.0, maximum=3, step=0.1, label="Noise Level") with gr.Row(): with gr.Row(): output1 = gr.Image(label="Dreamlike Diffusion 1.0", show_label=False, show_share_button=False) output2 = gr.Image(label="Dreamlike Diffusion 1.0", show_label=False, show_share_button=False) see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], queue=False) run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1]) run.click(send_it1, inputs=[prompt, noise_level], outputs=[output2]) demo.launch(enable_queue=True, inline=True)