|
import discord |
|
import gradio_client |
|
from gradio_client import Client |
|
import gradio as gr |
|
import os |
|
import threading |
|
|
|
|
|
import requests |
|
import json |
|
import random |
|
from PIL import Image |
|
import matplotlib.pyplot as plt |
|
import matplotlib.image as mpimg |
|
import time |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DFIF_TOKEN = os.getenv('DFIF_TOKEN') |
|
|
|
|
|
df = Client("DeepFloyd/IF", DFIF_TOKEN) |
|
|
|
|
|
|
|
sdlu = Client("huggingface-projects/stable-diffusion-latent-upscaler", DFIF_TOKEN) |
|
|
|
|
|
class MyClient(discord.Client): |
|
async def on_ready(self): |
|
print('Logged on as', self.user) |
|
|
|
|
|
async def on_message(self, message): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if message.author == self.user: |
|
return |
|
|
|
|
|
OFFLINE_ROLE_ID = 1103676632667017266 |
|
guild = message.guild |
|
bot_member = guild.get_member(self.user.id) |
|
if any(role.id == OFFLINE_ROLE_ID for role in bot_member.roles): |
|
return |
|
|
|
|
|
REQUIRED_ROLE_ID = 897376942817419265 |
|
if not any(role.id == REQUIRED_ROLE_ID for role in message.author.roles): |
|
return |
|
|
|
|
|
ALLOWED_CHANNEL_IDS = [1100458786826747945] |
|
if message.channel.id not in ALLOWED_CHANNEL_IDS: |
|
return |
|
|
|
|
|
|
|
if message.content.startswith('!deepfloydif'): |
|
|
|
|
|
|
|
|
|
|
|
prompt = message.content[12:].strip() |
|
number_of_images = 4 |
|
|
|
|
|
current_time = int(time.time()) |
|
random.seed(current_time) |
|
seed = random.randint(0, 2**32 - 1) |
|
|
|
stage_1_results, stage_1_param_path, stage_1_result_path = df.predict(prompt, "blur", seed,number_of_images,7.0, 'smart100',50, api_name="/generate64") |
|
|
|
|
|
|
|
|
|
png_files = [f for f in os.listdir(stage_1_results) if f.endswith('.png')] |
|
|
|
|
|
if png_files: |
|
first_png = png_files[0] |
|
second_png = png_files[1] |
|
third_png = png_files[2] |
|
fourth_png = png_files[3] |
|
|
|
''' |
|
[],[],[],[] -> [][] |
|
[][] |
|
|
|
''' |
|
|
|
first_png_path = os.path.join(stage_1_results, first_png) |
|
second_png_path = os.path.join(stage_1_results, second_png) |
|
third_png_path = os.path.join(stage_1_results, third_png) |
|
fourth_png_path = os.path.join(stage_1_results, fourth_png) |
|
|
|
img1 = Image.open(first_png_path) |
|
img2 = Image.open(second_png_path) |
|
img3 = Image.open(third_png_path) |
|
img4 = Image.open(fourth_png_path) |
|
|
|
|
|
combined_image = Image.new('RGB', (img1.width * 2, img1.height * 2)) |
|
|
|
|
|
combined_image.paste(img1, (0, 0)) |
|
combined_image.paste(img2, (img1.width, 0)) |
|
combined_image.paste(img3, (0, img1.height)) |
|
combined_image.paste(img4, (img1.width, img1.height)) |
|
|
|
|
|
combined_image_path = os.path.join(stage_1_results, 'combined_image.png') |
|
combined_image.save(combined_image_path) |
|
|
|
|
|
with open(combined_image_path, 'rb') as f: |
|
await message.reply('Here is the combined image', file=discord.File(f, 'combined_image.png')) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
''' |
|
# stage 2 |
|
selected_index_for_stage_2 = -1 |
|
custom_timesteps_2 = 'smart100' # could reset to smart50 if index was the issue |
|
seed = 362572064 # again, could randomize this seed = 362572064 seed = random.randint(0, 2**32 - 1) |
|
|
|
# predict(stage_1_result_path, selected_index_for_stage_2, seed, guidance_scale, custom_timesteps_2, number_of_inference_steps, api_name="/upscale256") -> result |
|
|
|
img = df.predict(stage_1_result_path, selected_index_for_stage_2, seed, guidance_scale, custom_timesteps_2, number_of_inference_steps, api_name="/upscale256") |
|
|
|
# Save the generated image to a file |
|
img_path = "/tmp/generated_image.png" |
|
img.save(img_path) |
|
|
|
# Send the image file as a Discord attachment |
|
with open(img_path, 'rb') as f: |
|
await message.reply(f'Here is the generated image', file=discord.File(f, 'generated_image.png')) |
|
''' |
|
|
|
|
|
|
|
DISCORD_TOKEN = os.environ.get("GRADIOTEST_TOKEN", None) |
|
intents = discord.Intents.default() |
|
intents.message_content = True |
|
client = MyClient(intents=intents) |
|
|
|
def run_bot(): |
|
client.run(DISCORD_TOKEN) |
|
|
|
threading.Thread(target=run_bot).start() |
|
|
|
def greet(name): |
|
return "Hello " + name + "!" |
|
|
|
demo = gr.Interface(fn=greet, inputs="text", outputs="text") |
|
demo.launch() |