tungedng2710 commited on
Commit
486a808
1 Parent(s): 137b89f

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: TonAI Assistant
3
- emoji: 📚
4
- colorFrom: indigo
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 4.36.0
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: TonAI-Assistant
 
 
 
 
 
3
  app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 4.31.2
6
  ---
 
 
__pycache__/style.cpython-310.pyc ADDED
Binary file (813 Bytes). View file
 
__pycache__/utils.cpython-310.pyc ADDED
Binary file (2.69 kB). View file
 
app.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import torch
3
+ import gc
4
+ import gradio as gr
5
+ from PIL import Image
6
+ from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline,\
7
+ StableDiffusionXLPipeline, StableDiffusionUpscalePipeline,\
8
+ DiffusionPipeline
9
+ from utils import *
10
+ from style import custom_css, header_html
11
+
12
+ def gen_image(prompt, negative_prompt, width, height, num_steps, mode, seed, guidance_scale, device):
13
+ """
14
+ Run diffusion model to generate image
15
+ """
16
+ device = f"cuda:{device.split('GPU')[1][1]}"
17
+ guidance_scale = float(guidance_scale)
18
+ generator = torch.Generator(device).manual_seed(int(seed))
19
+ model_path = DIFFUSION_CHECKPOINTS[mode]["path"]
20
+ Text2Image_class = globals()[DIFFUSION_CHECKPOINTS[mode]["pipeline"]]
21
+ if DIFFUSION_CHECKPOINTS[mode]["type"] == "pretrained":
22
+ pipeline = Text2Image_class.from_pretrained(model_path)
23
+ else:
24
+ pipeline = Text2Image_class.from_single_file(model_path)
25
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
26
+ try:
27
+ pipeline = pipeline.to(device)
28
+ image = pipeline(prompt=prompt,
29
+ negative_prompt=negative_prompt,
30
+ width=nearest_divisible_by_8(int(width)),
31
+ height=nearest_divisible_by_8(int(height)),
32
+ num_inference_steps=int(num_steps),
33
+ generator=generator,
34
+ guidance_scale=guidance_scale).images[0]
35
+ except Exception as e:
36
+ image = Image.open("stuffs/serverdown.jpg")
37
+ print(e)
38
+ del pipeline
39
+ torch.cuda.empty_cache()
40
+ gc.collect()
41
+ return image
42
+
43
+ with gr.Blocks(title="TonAI Creative", theme=APP_THEME) as interface1:
44
+ gr.HTML(header_html)
45
+ with gr.Row():
46
+ with gr.Column(scale=3):
47
+ prompt = gr.Textbox(label="Prompt", placeholder="Tell me what you want to generate")
48
+ negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Instruct the AI model that it should not include")
49
+ with gr.Row():
50
+ width = gr.Textbox(label="Image Width", value=768)
51
+ height = gr.Textbox(label="Image Height", value=768)
52
+ with gr.Row():
53
+ seed = gr.Textbox(label="RNG Seed", value=0, scale=1)
54
+ guidance_scale = gr.Textbox(label="CFG Scale", value=7, scale=1)
55
+ with gr.Row():
56
+ num_steps = gr.components.Slider(
57
+ minimum=5, maximum=60, value=20, step=1,
58
+ label="Inference Steps"
59
+ )
60
+ mode=gr.Dropdown(choices=DIFFUSION_CHECKPOINTS.keys(), label="Mode",
61
+ value=list(DIFFUSION_CHECKPOINTS.keys())[1])
62
+ device_choices = display_gpu_info()
63
+ device=gr.Dropdown(choices=device_choices, label="Device", value=device_choices[0])
64
+ generate_btn = gr.Button("Generate")
65
+ with gr.Column(scale=2):
66
+ generate_btn.click(
67
+ fn=gen_image,
68
+ inputs=[prompt, negative_prompt, width, height, num_steps, mode, seed, guidance_scale, device],
69
+ outputs=gr.Image(label="Generated Image", format="png"),
70
+ concurrency_limit=10
71
+ )
72
+ interface1.load(lambda: gr.update(value=random.randint(0, 999999)), None, seed)
73
+ interface1.load(lambda: gr.update(choices=display_gpu_info(), value=display_gpu_info()[0]), None, device)
74
+
75
+ # interface = gr.TabbedInterface([interface1, iface2], ["Text-to-text", "image-to-text"])
76
+ allowed_paths=["stuffs/tonai_research_logo.png"]
77
+ interface1.queue(default_concurrency_limit=10)
78
+ interface1.launch(share=True,
79
+ allowed_paths=allowed_paths,
80
+ max_threads=10)
demo_app.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import torch
3
+ import gc
4
+ import gradio as gr
5
+ from PIL import Image
6
+ from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline,\
7
+ StableDiffusionXLPipeline, StableDiffusionUpscalePipeline,\
8
+ DiffusionPipeline
9
+ from utils import *
10
+ from style import custom_css, beta_header_html
11
+
12
+ def gen_image(prompt, negative_prompt, width, height, num_steps,
13
+ mode, seed, guidance_scale, device):
14
+ """
15
+ Run diffusion model to generate image
16
+ """
17
+ use_adapter = True
18
+ device = f"cuda:{device.split('GPU')[1][1]}"
19
+ guidance_scale = float(guidance_scale)
20
+ generator = torch.Generator(device).manual_seed(int(seed))
21
+ model_path = DIFFUSION_CHECKPOINTS[mode]["path"]
22
+ Text2Image_class = globals()[DIFFUSION_CHECKPOINTS[mode]["pipeline"]]
23
+ if DIFFUSION_CHECKPOINTS[mode]["type"] == "pretrained":
24
+ pipeline = Text2Image_class.from_pretrained(model_path)
25
+ else:
26
+ pipeline = Text2Image_class.from_single_file(model_path)
27
+
28
+ if use_adapter:
29
+ if "SDXL 1.0" in mode:
30
+ print("Load LoRA model")
31
+ pipeline.load_lora_weights("../checkpoints", weight_name="mod2.safetensors")
32
+
33
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
34
+ try:
35
+ pipeline = pipeline.to(device)
36
+ image = pipeline(prompt=prompt,
37
+ negative_prompt=negative_prompt,
38
+ width=nearest_divisible_by_8(int(width)),
39
+ height=nearest_divisible_by_8(int(height)),
40
+ num_inference_steps=int(num_steps),
41
+ generator=generator,
42
+ guidance_scale=guidance_scale).images[0]
43
+ del pipeline
44
+ torch.cuda.empty_cache()
45
+ gc.collect()
46
+ except Exception as e:
47
+ image = Image.open("stuffs/serverdown.jpg")
48
+ print(e)
49
+ del pipeline
50
+ torch.cuda.empty_cache()
51
+ gc.collect()
52
+ return image
53
+ return image
54
+
55
+ with gr.Blocks(title="(Beta) TonAI Creative", theme=APP_THEME) as interface1:
56
+ gr.HTML(beta_header_html)
57
+ with gr.Row():
58
+ with gr.Column(scale=3):
59
+ prompt = gr.Textbox(label="Prompt",
60
+ placeholder="Tell me what you want to generate",
61
+ container=True)
62
+ negative_prompt = gr.Textbox(label="Negative Prompt",
63
+ placeholder="Instruct the AI model that it should not include",
64
+ container=True)
65
+ with gr.Row():
66
+ width = gr.Textbox(label="Image Width", value=768)
67
+ height = gr.Textbox(label="Image Height", value=768)
68
+ with gr.Row():
69
+ seed = gr.Textbox(label="RNG Seed", value=0, scale=1)
70
+ guidance_scale = gr.Textbox(label="CFG Scale", value=7, scale=1)
71
+ with gr.Row():
72
+ num_steps = gr.components.Slider(
73
+ minimum=5, maximum=60, value=20, step=1,
74
+ label="Inference Steps"
75
+ )
76
+ mode=gr.Dropdown(choices=DIFFUSION_CHECKPOINTS.keys(), label="Mode",
77
+ value=list(DIFFUSION_CHECKPOINTS.keys())[1])
78
+ device_choices = display_gpu_info()
79
+ device=gr.Dropdown(choices=device_choices, label="Device", value=device_choices[0])
80
+ generate_btn = gr.Button("Generate")
81
+ with gr.Column(scale=2):
82
+ generate_btn.click(
83
+ fn=gen_image,
84
+ inputs=[prompt, negative_prompt, width, height, num_steps, mode, seed, guidance_scale, device],
85
+ outputs=gr.Image(label="Generated Image", format="png"),
86
+ concurrency_limit=5
87
+ )
88
+ interface1.load(lambda: gr.update(value=random.randint(0, 99999)), None, seed)
89
+ interface1.load(lambda: gr.update(choices=display_gpu_info(), value=display_gpu_info()[0]), None, device)
90
+
91
+ # interface = gr.TabbedInterface([interface1, iface2], ["Text-to-text", "image-to-text"])
92
+ allowed_paths=["stuffs/tonai_research_logo.png"]
93
+ interface1.queue(default_concurrency_limit=5)
94
+ interface1.launch(share=False,
95
+ allowed_paths=allowed_paths,
96
+ max_threads=5)
info(beta).html ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ <p align="center">
2
+ <img src='file/stuffs/tonai_research_logo.png' alt='Logo' style='width: 100px; text-align: center'>
3
+ </p>
4
+ <h1 style='text-align: center;' markdown='1'>TonAI Creative (Beta Version)</h1>
5
+ <h3 style='text-align: center;' markdown='1'>Create images and art using AI</h3>
info.html ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ <p align="center">
2
+ <img src='file/stuffs/tonai_research_logo.png' alt='Logo' style='width: 100px; text-align: center'>
3
+ </p>
4
+ <h1 style='text-align: center;' markdown='1'>TonAI Creative</h1>
5
+ <h3 style='text-align: center;' markdown='1'>Create images and art using AI</h3>
requirements.txt ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==2.0.0
2
+ accelerate==0.27.2
3
+ aiofiles==23.2.1
4
+ aiohttp==3.8.6
5
+ aiosignal==1.3.1
6
+ altair==5.2.0
7
+ annotated-types==0.6.0
8
+ anyio==4.3.0
9
+ appdirs==1.4.4
10
+ asgiref==3.8.1
11
+ asttokens==2.4.0
12
+ async-property==0.2.2
13
+ async-timeout==4.0.3
14
+ asyncstdlib==3.12.2
15
+ attrs==23.2.0
16
+ auto_gptq==0.7.0
17
+ backcall==0.2.0
18
+ beartype==0.18.5
19
+ beautifulsoup4==4.12.2
20
+ bitsandbytes==0.41.1
21
+ bleach==6.1.0
22
+ blinker==1.7.0
23
+ blis==0.7.11
24
+ blobfile==2.1.1
25
+ Brotli==1.1.0
26
+ brotlipy==0.7.0
27
+ browser-cookie3==0.19.1
28
+ bs4==0.0.1
29
+ cachetools==5.3.2
30
+ catalogue==2.0.10
31
+ certifi==2024.2.2
32
+ cffi @ file:///tmp/abs_98z5h56wf8/croots/recipe/cffi_1659598650955/work
33
+ chardet==3.0.4
34
+ charset-normalizer @ file:///tmp/build/80754af9/charset-normalizer_1630003229654/work
35
+ click==8.1.7
36
+ cloudpathlib==0.16.0
37
+ cloudpickle==3.0.0
38
+ cmake==3.25.2
39
+ colorama==0.4.6
40
+ coloredlogs==15.0.1
41
+ comm==0.1.4
42
+ confection==0.1.4
43
+ contourpy==1.2.0
44
+ cryptography @ file:///croot/cryptography_1694444244250/work
45
+ cssselect==1.2.0
46
+ curl_cffi==0.6.3b1
47
+ cycler==0.12.1
48
+ cymem==2.0.8
49
+ DALL-E==0.1
50
+ dataclasses-json==0.6.4
51
+ datasets==2.14.6
52
+ debugpy==1.8.0
53
+ decorator==5.1.1
54
+ denoising-diffusion-pytorch==1.9.2
55
+ diffuser==0.0.1
56
+ -e git+https://github.com/huggingface/diffusers@965e52ce611108559a0ebab75c8b421d1229c5ab#egg=diffusers
57
+ dill==0.3.7
58
+ diskcache==5.6.3
59
+ dlib==19.24.2
60
+ dnspython==2.6.1
61
+ docker-pycreds==0.4.0
62
+ duckduckgo_search==5.3.0
63
+ easyocr==1.7.1
64
+ einops==0.7.0
65
+ ema-pytorch==0.2.3
66
+ email_validator==2.1.1
67
+ exceptiongroup==1.1.3
68
+ executing==2.0.0
69
+ fastapi==0.111.0
70
+ fastapi-cli==0.0.4
71
+ feedfinder2==0.0.4
72
+ feedparser==6.0.11
73
+ ffmpy==0.3.2
74
+ filelock @ file:///croot/filelock_1672387128942/work
75
+ filterpy==1.4.5
76
+ fire==0.5.0
77
+ Flask==3.0.3
78
+ flatbuffers==23.5.26
79
+ fonttools==4.44.0
80
+ frozenlist==1.4.0
81
+ fsspec==2023.9.2
82
+ ftfy==6.1.1
83
+ g4f==0.3.1.8
84
+ gdown==4.7.1
85
+ gekko==1.0.6
86
+ gitdb==4.0.11
87
+ GitPython==3.1.40
88
+ gmpy2 @ file:///tmp/build/80754af9/gmpy2_1645455533097/work
89
+ google==3.0.0
90
+ google-auth==2.23.4
91
+ google-auth-oauthlib==1.1.0
92
+ googletrans==3.0.0
93
+ gradio==4.31.2
94
+ gradio_client==0.16.3
95
+ greenlet==3.0.3
96
+ grpcio==1.59.2
97
+ h11==0.14.0
98
+ h2==3.2.0
99
+ hpack==3.0.0
100
+ hstspreload==2023.1.1
101
+ httpcore==0.18.0
102
+ httptools==0.6.1
103
+ httpx==0.25.0
104
+ huggingface-hub==0.21.3
105
+ humanfriendly==10.0
106
+ hyperframe==5.2.0
107
+ idna==2.10
108
+ imageio==2.33.1
109
+ imagen-pytorch==1.26.3
110
+ importlib-metadata==6.8.0
111
+ importlib-resources==6.1.1
112
+ iniconfig==2.0.0
113
+ interegular==0.3.3
114
+ iopath==0.1.10
115
+ ipykernel==6.25.2
116
+ ipython==8.16.1
117
+ itsdangerous==2.1.2
118
+ jedi==0.19.1
119
+ jeepney==0.8.0
120
+ jieba3k==0.35.1
121
+ Jinja2 @ file:///croot/jinja2_1666908132255/work
122
+ joblib==1.3.2
123
+ Js2Py==0.74
124
+ jsonpatch==1.33
125
+ jsonpointer==2.4
126
+ jsonschema==4.21.1
127
+ jsonschema-specifications==2023.12.1
128
+ jupyter_client==8.4.0
129
+ jupyter_core==5.4.0
130
+ kaggle==1.6.12
131
+ kiwisolver==1.4.5
132
+ kornia==0.7.2
133
+ kornia_rs==0.1.3
134
+ langchain==0.1.9
135
+ langchain-community==0.0.24
136
+ langchain-core==0.1.27
137
+ langcodes==3.3.0
138
+ langsmith==0.1.10
139
+ lark==1.1.9
140
+ lazy_loader==0.3
141
+ llvmlite==0.42.0
142
+ loguru==0.7.2
143
+ lora-diffusion @ git+https://github.com/cloneofsimo/lora.git@bdd51b04c49fa90a88919a19850ec3b4cf3c5ecd
144
+ loralib==0.1.2
145
+ lxml==4.9.3
146
+ lz4==4.3.3
147
+ Markdown==3.5.1
148
+ markdown-it-py==3.0.0
149
+ MarkupSafe @ file:///opt/conda/conda-bld/markupsafe_1654597864307/work
150
+ marshmallow==3.21.0
151
+ matplotlib==3.8.1
152
+ matplotlib-inline==0.1.6
153
+ mdurl==0.1.2
154
+ mediapipe==0.10.8
155
+ mkl-fft==1.3.1
156
+ mkl-random @ file:///home/builder/ci_310/mkl_random_1641843545607/work
157
+ mkl-service==2.4.0
158
+ mmh3==4.1.0
159
+ mpmath @ file:///croot/mpmath_1690848262763/work
160
+ msgpack==1.0.8
161
+ multidict==6.0.4
162
+ multiprocess==0.70.15
163
+ murmurhash==1.0.10
164
+ mypy==1.7.0
165
+ mypy-extensions==1.0.0
166
+ nest-asyncio==1.5.8
167
+ networkx @ file:///croot/networkx_1690561992265/work
168
+ newspaper3k==0.2.8
169
+ ninja==1.11.1.1
170
+ nltk==3.8.1
171
+ numba==0.59.1
172
+ numpy @ file:///croot/numpy_and_numpy_base_1682520569166/work
173
+ nvidia-cublas-cu12==12.1.3.1
174
+ nvidia-cuda-cupti-cu12==12.1.105
175
+ nvidia-cuda-nvrtc-cu12==12.1.105
176
+ nvidia-cuda-runtime-cu12==12.1.105
177
+ nvidia-cudnn-cu12==8.9.2.26
178
+ nvidia-cufft-cu12==11.0.2.54
179
+ nvidia-curand-cu12==10.3.2.106
180
+ nvidia-cusolver-cu12==11.4.5.107
181
+ nvidia-cusparse-cu12==12.1.0.106
182
+ nvidia-nccl-cu12==2.18.1
183
+ nvidia-nvjitlink-cu12==12.4.127
184
+ nvidia-nvtx-cu12==12.1.105
185
+ oauthlib==3.2.2
186
+ onnxruntime-gpu==1.16.0
187
+ open-clip-torch==2.24.0
188
+ opencv-contrib-python==4.8.1.78
189
+ opencv-python==4.8.1.78
190
+ opencv-python-headless==4.9.0.80
191
+ optimum==1.17.1
192
+ orjson==3.10.0
193
+ outcome==1.3.0.post0
194
+ outlines==0.0.34
195
+ packaging==23.2
196
+ pandas==2.1.1
197
+ parso==0.8.3
198
+ pathlib_abc==0.1.1
199
+ pathos==0.3.1
200
+ pathy==0.11.0
201
+ peft==0.6.2
202
+ pexpect==4.8.0
203
+ pickleshare==0.7.5
204
+ pillow==10.3.0
205
+ platformdirs==3.11.0
206
+ pluggy==1.3.0
207
+ portalocker==2.8.2
208
+ pox==0.3.3
209
+ ppft==1.7.6.7
210
+ preshed==3.0.9
211
+ progressbar==2.5
212
+ prometheus_client==0.20.0
213
+ prompt-toolkit==3.0.39
214
+ protobuf==3.20.3
215
+ psutil==5.9.6
216
+ ptyprocess==0.7.0
217
+ pure-eval==0.2.2
218
+ py-arkose-generator==0.0.0.2
219
+ py-cpuinfo==9.0.0
220
+ pyarrow==14.0.1
221
+ pyasn1==0.5.0
222
+ pyasn1-modules==0.3.0
223
+ pyclipper==1.3.0.post5
224
+ pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
225
+ pycryptodome==3.20.0
226
+ pycryptodomex==3.19.0
227
+ pydantic==2.6.1
228
+ pydantic_core==2.16.2
229
+ pydub==0.25.1
230
+ PyExecJS==1.5.1
231
+ Pygments==2.16.1
232
+ pyjsparser==2.7.1
233
+ PyMuPDF==1.24.4
234
+ PyMuPDFb==1.24.3
235
+ pynvml==11.5.0
236
+ pyOpenSSL @ file:///croot/pyopenssl_1690223430423/work
237
+ pyparsing==3.1.1
238
+ PySocks @ file:///home/builder/ci_310/pysocks_1640793678128/work
239
+ pyTelegramBotAPI==4.14.0
240
+ pytest==7.4.3
241
+ python-bidi==0.4.2
242
+ python-crfsuite==0.9.10
243
+ python-dateutil==2.8.2
244
+ python-dotenv==1.0.1
245
+ python-multipart==0.0.9
246
+ python-slugify==8.0.4
247
+ pytorch-fid==0.3.0
248
+ pytorch-warmup==0.1.1
249
+ pytz==2023.3.post1
250
+ pyvi==0.1.1
251
+ PyYAML @ file:///croot/pyyaml_1670514731622/work
252
+ pyzmq==25.1.1
253
+ ray==2.11.0
254
+ referencing==0.33.0
255
+ regex==2023.10.3
256
+ requests @ file:///croot/requests_1690400202158/work
257
+ requests-file==2.0.0
258
+ requests-oauthlib==1.3.1
259
+ rfc3986==1.5.0
260
+ rich==13.7.0
261
+ rouge==1.0.1
262
+ rpds-py==0.18.0
263
+ rsa==4.9
264
+ ruff==0.2.2
265
+ safetensors==0.4.2
266
+ scikit-image==0.22.0
267
+ scikit-learn==1.4.2
268
+ scipy==1.11.3
269
+ seaborn==0.13.2
270
+ selenium==4.19.0
271
+ semantic-version==2.10.0
272
+ sentencepiece==0.1.99
273
+ sentry-sdk==1.37.0
274
+ setproctitle==1.3.3
275
+ sgmllib3k==1.0.0
276
+ shapely==2.0.4
277
+ shellingham==1.5.4
278
+ six @ file:///tmp/build/80754af9/six_1644875935023/work
279
+ sklearn-crfsuite==0.3.6
280
+ smart-open==6.4.0
281
+ smmap==5.0.1
282
+ sniffio==1.3.0
283
+ sortedcontainers==2.4.0
284
+ sounddevice==0.4.6
285
+ soupsieve==2.5
286
+ spacy==3.6.1
287
+ spacy-legacy==3.0.12
288
+ spacy-loggers==1.0.5
289
+ SQLAlchemy==2.0.27
290
+ srsly==2.4.8
291
+ stack-data==0.6.3
292
+ starlette==0.37.2
293
+ sympy @ file:///croot/sympy_1668202399572/work
294
+ tabulate==0.9.0
295
+ tenacity==8.2.3
296
+ tensorboard==2.15.1
297
+ tensorboard-data-server==0.7.2
298
+ termcolor==2.3.0
299
+ text-unidecode==1.3
300
+ thinc==8.1.12
301
+ thop==0.1.1.post2209072238
302
+ threadpoolctl==3.4.0
303
+ tifffile==2024.1.30
304
+ tiktoken==0.6.0
305
+ timm==0.9.16
306
+ tinysegmenter==0.3
307
+ tldextract==5.1.1
308
+ tokenizers==0.19.1
309
+ tomli==2.0.1
310
+ tomlkit==0.12.0
311
+ toolz==0.12.1
312
+ torch==2.1.2
313
+ torchaudio==2.1.0
314
+ torchmultimodal-nightly==2023.11.13
315
+ torchvision==0.16.0
316
+ tornado==6.3.3
317
+ tqdm==4.66.1
318
+ traitlets==5.11.2
319
+ transformers==4.40.0
320
+ translators==5.8.8
321
+ trio==0.25.0
322
+ trio-websocket==0.11.1
323
+ triton==2.1.0
324
+ typer==0.12.3
325
+ typing-inspect==0.9.0
326
+ typing_extensions==4.9.0
327
+ tzdata==2023.3
328
+ tzlocal==5.2
329
+ ujson==5.10.0
330
+ ultralytics==8.2.2
331
+ undetected-chromedriver==3.5.5
332
+ Unidecode==1.3.8
333
+ urllib3==2.2.1
334
+ uvicorn==0.29.0
335
+ uvloop==0.19.0
336
+ vi-core-news-lg @ https://gitlab.com/trungtv/vi_spacy/-/raw/master/packages/vi_core_news_lg-3.6.0/dist/vi_core_news_lg-3.6.0.tar.gz#sha256=4f3a3b88c03c1d035a924f7103a5c81ac2922571f2db5022898978b5b3eb5976
337
+ vllm==0.4.0.post1
338
+ waitress==3.0.0
339
+ wandb==0.16.6
340
+ wasabi==1.1.2
341
+ watchfiles==0.21.0
342
+ wcwidth==0.2.8
343
+ weasel==0.3.4
344
+ webencodings==0.5.1
345
+ websockets==11.0.3
346
+ Werkzeug==3.0.1
347
+ wsproto==1.2.0
348
+ xformers==0.0.23.post1
349
+ xxhash==3.4.1
350
+ yarl==1.9.2
351
+ zipp==3.17.0
stuffs/negative_prompt.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature
stuffs/serverdown.jpg ADDED
stuffs/tonai_research_logo.png ADDED
style.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Iterable
2
+ from utils import *
3
+ from gradio.themes.base import Base
4
+ from gradio.themes.utils import colors, fonts, sizes
5
+ import time
6
+
7
+ custom_css = """
8
+ body {
9
+ background-color: #f0f0f0;
10
+ color: #333;
11
+ font-family: Arial, sans-serif;
12
+ }
13
+
14
+ .gr-button {
15
+ background-color: #4CAF50;
16
+ color: white;
17
+ border: none;
18
+ border-radius: 4px;
19
+ padding: 10px 20px;
20
+ cursor: pointer;
21
+ }
22
+
23
+ .gr-button:hover {
24
+ background-color: #45a049;
25
+ }
26
+
27
+ .gr-textbox, .gr-image {
28
+ margin-top: 10px;
29
+ }
30
+ """
31
+ header_html = read_md_file_to_string("info.html")
32
+ beta_header_html = read_md_file_to_string("info(beta).html")
utils.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import GPUtil
3
+
4
+ DIFFUSION_CHECKPOINTS = {
5
+ "General (SD 2.1)": {
6
+ "path": "stabilityai/stable-diffusion-2-1",
7
+ "type": "pretrained",
8
+ "pipeline": "StableDiffusionPipeline"
9
+ },
10
+ "Realistic (SD 1.5)": {
11
+ "path": "/root/tungn197/genAI/checkpoints/realisticVisionV60B1_v51HyperVAE.safetensors",
12
+ "type": "file",
13
+ "pipeline": "StableDiffusionPipeline"
14
+ },
15
+ "Anime (SD 1.5)": {
16
+ "path": "/root/tungn197/genAI/checkpoints/darkSushiMixMix_225D.safetensors",
17
+ "type": "file",
18
+ "pipeline": "StableDiffusionPipeline"
19
+ },
20
+ "Comic Book (SD 1.5)": {
21
+ "path": "/media/drive-2t/tungn197/checkpoints/realisticComicBook_v10.safetensors",
22
+ "type": "file",
23
+ "pipeline": "StableDiffusionPipeline"
24
+ },
25
+ "MajicMixRealisticV7 (SD 1.5 Focus on Asian face)": {
26
+ "path": "/root/tungn197/genAI/checkpoints/majicmixRealistic_v7.safetensors",
27
+ "type": "file",
28
+ "pipeline": "StableDiffusionPipeline"
29
+ },
30
+ "AniMeshFullV22 (SD 1.5)": {
31
+ "path": "/media/drive-2t/tungn197/checkpoints/animesh_FullV22.safetensors",
32
+ "type": "file",
33
+ "pipeline": "StableDiffusionPipeline"
34
+ },
35
+ "epiCRealism XL (SDXL 1.0)": {
36
+ "path": "/root/tungn197/genAI/checkpoints/epicrealismXL_v7FinalDestination.safetensors",
37
+ "type": "file",
38
+ "pipeline": "StableDiffusionXLPipeline"
39
+ },
40
+ "Juggernaut X Hyper (SDXL 1.0)": {
41
+ "path": "RunDiffusion/Juggernaut-X-Hyper",
42
+ "type": "pretrained",
43
+ "pipeline": "StableDiffusionXLPipeline"
44
+ }
45
+ }
46
+ AUTH_USER = [("admin", "admin")]
47
+ DEVICE = "cuda:0"
48
+ APP_THEME = gr.Theme.from_hub("ParityError/Interstellar")
49
+
50
+ def read_md_file_to_string(file_path):
51
+ try:
52
+ with open(file_path, 'r', encoding='utf-8') as file:
53
+ file_content = file.read()
54
+ return file_content
55
+ except FileNotFoundError:
56
+ print(f"The file at {file_path} was not found.")
57
+ except Exception as e:
58
+ print(f"An error occurred: {e}")
59
+
60
+ def nearest_divisible_by_8(n):
61
+ lower_multiple = (n // 8) * 8
62
+ upper_multiple = lower_multiple + 8
63
+ if (n - lower_multiple) < (upper_multiple - n):
64
+ return int(lower_multiple)
65
+ else:
66
+ return int(upper_multiple)
67
+
68
+ def get_gpu_info():
69
+ gpus = GPUtil.getGPUs()
70
+ gpu_info = []
71
+ for gpu in gpus:
72
+ info = {
73
+ 'id': gpu.id,
74
+ 'name': gpu.name,
75
+ 'driver_version': gpu.driver,
76
+ 'total_memory': gpu.memoryTotal, # In MB
77
+ 'available_memory': gpu.memoryFree, # In MB
78
+ 'used_memory': gpu.memoryUsed, # In MB
79
+ 'temperature': gpu.temperature # In Celsius
80
+ }
81
+ gpu_info.append(info)
82
+ return gpu_info
83
+
84
+ def display_gpu_info():
85
+ info_list = []
86
+ gpus = get_gpu_info()
87
+ for info in gpus:
88
+ info_list.append(f"GPU {info['id']} ({info['name']}, Total: {info['total_memory']} MB, Available: {info['available_memory']} MB)")
89
+ return info_list
90
+
91
+ GPUS = get_gpu_info()
92
+ DISPLAYED_GPUS_INFO = []
93
+ for info in GPUS:
94
+ DISPLAYED_GPUS_INFO.append(f"GPU {info['id']}")