Rooni commited on
Commit
84e410f
1 Parent(s): 0016ec4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -42
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
- import requests
3
- import io
4
  import random
5
  import os
6
  from PIL import Image
@@ -8,11 +8,12 @@ from deep_translator import GoogleTranslator
8
  import json
9
  from langdetect import detect
10
 
11
- api_base = "https://api-inference.huggingface.co/models/"
12
- model = "cagliostrolab/animagine-xl-3.1" # Модель Animagine XL 3.1
13
- timeout = 100
 
14
 
15
- def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, gpt=False):
16
  if prompt == "" or prompt == None:
17
  return None
18
 
@@ -52,8 +53,6 @@ def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Ka
52
  else:
53
  print(f"Error: {response.status_code} - {response.text}")
54
 
55
- API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN"), os.getenv("HF_READ_TOKEN_2"), os.getenv("HF_READ_TOKEN_3"), os.getenv("HF_READ_TOKEN_4"), os.getenv("HF_READ_TOKEN_5")])
56
- headers = {"Authorization": f"Bearer {API_TOKEN}"}
57
  language = detect(prompt)
58
 
59
  if language != 'en':
@@ -63,37 +62,13 @@ def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Ka
63
  prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
64
  print(f'\033[1mГенерация {key}:\033[0m {prompt}')
65
 
66
- API_URL = model
 
67
 
68
- payload = {
69
- "inputs": prompt,
70
- "is_negative": is_negative,
71
- "steps": steps,
72
- "cfg_scale": cfg_scale,
73
- "seed": seed if seed != -1 else random.randint(1, 1000000000),
74
- "guidance_scale": cfg_scale,
75
- "num_inference_steps": steps,
76
- "negative_prompt": is_negative
77
- }
78
-
79
- response = requests.post(f"{api_base}{API_URL}", headers=headers, json=payload, timeout=timeout)
80
- if response.status_code != 200:
81
- print(f"Ошибка: Не удалось получить изображение. Статус ответа: {response.status_code}")
82
- print(f"Содержимое ответа: {response.text}")
83
- if response.status_code == 503:
84
- raise gr.Error(f"{response.status_code} : The model is being loaded")
85
- return None
86
- raise gr.Error(f"{response.status_code}")
87
- return None
88
-
89
- try:
90
- image_bytes = response.content
91
- image = Image.open(io.BytesIO(image_bytes))
92
- print(f'\033[1mГенерация {key} завершена!\033[0m ({prompt})')
93
- return image
94
- except Exception as e:
95
- print(f"Ошибка при попытке открыть изображение: {e}")
96
- return None
97
 
98
  css = """
99
  * {}
@@ -113,8 +88,6 @@ with gr.Blocks(css=css) as dalle:
113
  steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=70, step=1)
114
  with gr.Row():
115
  cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=0.1)
116
- with gr.Row():
117
- method = gr.Radio(label="Sampling method", value="Euler a", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
118
  with gr.Row():
119
  seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
120
  with gr.Row():
@@ -131,6 +104,6 @@ with gr.Blocks(css=css) as dalle:
131
  with gr.Row():
132
  image_output = gr.Image(type="pil", label="Изображение", elem_id="gallery")
133
 
134
- text_button.click(query, inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, gpt], outputs=image_output)
135
 
136
- dalle.queue(max_size=200).launch(show_api=False, share=False)
 
1
  import gradio as gr
2
+ import torch
3
+ from diffusers import DiffusionPipeline, EulerDiscreteScheduler
4
  import random
5
  import os
6
  from PIL import Image
 
8
  import json
9
  from langdetect import detect
10
 
11
+ model_id = "cagliostrolab/animagine-xl-3.1"
12
+ pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="main")
13
+ pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
14
+ pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
15
 
16
+ def query(prompt, is_negative=False, steps=30, cfg_scale=7, seed=-1, gpt=False):
17
  if prompt == "" or prompt == None:
18
  return None
19
 
 
53
  else:
54
  print(f"Error: {response.status_code} - {response.text}")
55
 
 
 
56
  language = detect(prompt)
57
 
58
  if language != 'en':
 
62
  prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
63
  print(f'\033[1mГенерация {key}:\033[0m {prompt}')
64
 
65
+ if seed == -1:
66
+ seed = random.randint(1, 1000000000)
67
 
68
+ generator = torch.Generator("cuda" if torch.cuda.is_available() else "cpu").manual_seed(seed)
69
+ image = pipe(prompt, negative_prompt=is_negative, guidance_scale=cfg_scale, num_inference_steps=steps, generator=generator).images[0]
70
+ print(f'\033[1mГенерация {key} завершена!\033[0m ({prompt})')
71
+ return image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  css = """
74
  * {}
 
88
  steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=70, step=1)
89
  with gr.Row():
90
  cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=0.1)
 
 
91
  with gr.Row():
92
  seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
93
  with gr.Row():
 
104
  with gr.Row():
105
  image_output = gr.Image(type="pil", label="Изображение", elem_id="gallery")
106
 
107
+ text_button.click(query, inputs=[text_prompt, negative_prompt, steps, cfg, seed, gpt], outputs=image_output)
108
 
109
+ dalle.queue(max_size=100).launch(show_api=False, share=False)