JingyeChen commited on
Commit
0f61c4d
1 Parent(s): bf48e0b
Files changed (2) hide show
  1. app.py +424 -4
  2. requirements.txt +7 -0
app.py CHANGED
@@ -1,7 +1,427 @@
 
 
 
 
1
  import gradio as gr
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import zipfile
4
+ import torch
5
  import gradio as gr
6
+ import time
7
+ from transformers import CLIPTextModel, CLIPTokenizer
8
+ from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
9
+ from tqdm import tqdm
10
+ from PIL import Image
11
+ from PIL import Image, ImageDraw, ImageFont
12
 
13
+ import string
14
+ alphabet = string.digits + string.ascii_lowercase + string.ascii_uppercase + string.punctuation + ' ' # len(aphabet) = 95
15
+ '''alphabet
16
+ 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~
17
+ '''
18
 
19
+ if not os.path.exists('arial.ttf'):
20
+ os.system('wget https://huggingface.co/datasets/JingyeChen22/TextDiffuser/resolve/main/arial.ttf')
21
+
22
+ if not os.path.exists('architecture.ttf'):
23
+ os.system('wget https://huggingface.co/JingyeChen22/textdiffuser2-full-ft/blob/main/architecture.jpg')
24
+
25
+ if not os.path.exists('gray256.jpg'):
26
+ os.system('wget https://huggingface.co/JingyeChen22/textdiffuser2-full-ft/blob/main/gray256.jpg')
27
+
28
+
29
+
30
+ # #### import m1
31
+ # from fastchat.model import load_model, get_conversation_template
32
+ # m1_model_path = '/home/jingyechen/FastChat/1204_final'
33
+ # m1_model, m1_tokenizer = load_model(
34
+ # m1_model_path,
35
+ # 'cuda',
36
+ # 1,
37
+ # None,
38
+ # False,
39
+ # False,
40
+ # revision="main",
41
+ # debug=False,
42
+ # )
43
+
44
+ #### import diffusion models
45
+ text_encoder = CLIPTextModel.from_pretrained(
46
+ 'JingyeChen22/textdiffuser2-full-ft', subfolder="text_encoder", ignore_mismatched_sizes=True
47
+ ).cuda()
48
+ tokenizer = CLIPTokenizer.from_pretrained(
49
+ 'runwayml/stable-diffusion-v1-5', subfolder="tokenizer"
50
+ )
51
+
52
+ #### additional tokens are introduced, including coordinate tokens and character tokens
53
+ print('***************')
54
+ print(len(tokenizer))
55
+ for i in range(520):
56
+ tokenizer.add_tokens(['l' + str(i) ]) # left
57
+ tokenizer.add_tokens(['t' + str(i) ]) # top
58
+ tokenizer.add_tokens(['r' + str(i) ]) # width
59
+ tokenizer.add_tokens(['b' + str(i) ]) # height
60
+ for c in alphabet:
61
+ tokenizer.add_tokens([f'[{c}]'])
62
+ print(len(tokenizer))
63
+ print('***************')
64
+
65
+ vae = AutoencoderKL.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder="vae").cuda()
66
+ unet = UNet2DConditionModel.from_pretrained(
67
+ 'JingyeChen22/textdiffuser2-full-ft', subfolder="unet"
68
+ ).cuda()
69
+ text_encoder.resize_token_embeddings(len(tokenizer))
70
+
71
+
72
+ #### for interactive
73
+ stack = []
74
+ state = 0
75
+ font = ImageFont.truetype("./arial.ttf", 32)
76
+
77
+ def skip_fun(i, t):
78
+ global state
79
+ state = 0
80
+
81
+
82
+ def exe_undo(i, t):
83
+ global stack
84
+ global state
85
+ state = 0
86
+ stack = []
87
+ image = Image.open('./gray256.jpg')
88
+ print('stack', stack)
89
+ return image
90
+
91
+
92
+ def exe_redo(i, t):
93
+ global state
94
+ state = 0
95
+
96
+ if len(stack) > 0:
97
+ stack.pop()
98
+ image = Image.open('./gray256.jpg')
99
+ draw = ImageDraw.Draw(image)
100
+
101
+ for items in stack:
102
+ # print('now', items)
103
+ text_position, t = items
104
+ if len(text_position) == 2:
105
+ x, y = text_position
106
+ text_color = (255, 0, 0)
107
+ draw.text((x+2, y), t, font=font, fill=text_color)
108
+ r = 4
109
+ leftUpPoint = (x-r, y-r)
110
+ rightDownPoint = (x+r, y+r)
111
+ draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
112
+ elif len(text_position) == 4:
113
+ x0, y0, x1, y1 = text_position
114
+ text_color = (255, 0, 0)
115
+ draw.text((x0+2, y0), t, font=font, fill=text_color)
116
+ r = 4
117
+ leftUpPoint = (x0-r, y0-r)
118
+ rightDownPoint = (x0+r, y0+r)
119
+ draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
120
+ draw.rectangle((x0,y0,x1,y1), outline=(255, 0, 0) )
121
+
122
+ print('stack', stack)
123
+ return image
124
+
125
+ def get_pixels(i, t, evt: gr.SelectData):
126
+ global state
127
+
128
+ text_position = evt.index
129
+
130
+ if state == 0:
131
+ stack.append(
132
+ (text_position, t)
133
+ )
134
+ print(text_position, stack)
135
+ state = 1
136
+ else:
137
+
138
+ (_, t) = stack.pop()
139
+ x, y = _
140
+ stack.append(
141
+ ((x,y,text_position[0],text_position[1]), t)
142
+ )
143
+ state = 0
144
+
145
+
146
+ image = Image.open('./gray256.jpg')
147
+ draw = ImageDraw.Draw(image)
148
+
149
+ for items in stack:
150
+ # print('now', items)
151
+ text_position, t = items
152
+ if len(text_position) == 2:
153
+ x, y = text_position
154
+ text_color = (255, 0, 0)
155
+ draw.text((x+2, y), t, font=font, fill=text_color)
156
+ r = 4
157
+ leftUpPoint = (x-r, y-r)
158
+ rightDownPoint = (x+r, y+r)
159
+ draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
160
+ elif len(text_position) == 4:
161
+ x0, y0, x1, y1 = text_position
162
+ text_color = (255, 0, 0)
163
+ draw.text((x0+2, y0), t, font=font, fill=text_color)
164
+ r = 4
165
+ leftUpPoint = (x0-r, y0-r)
166
+ rightDownPoint = (x0+r, y0+r)
167
+ draw.ellipse((leftUpPoint,rightDownPoint), fill='red')
168
+ draw.rectangle((x0,y0,x1,y1), outline=(255, 0, 0) )
169
+
170
+ print('stack', stack)
171
+
172
+ return image
173
+
174
+
175
+
176
+
177
+ def text_to_image(prompt,keywords,slider_step,slider_guidance,slider_batch,slider_temperature):
178
+
179
+ global stack
180
+ global state
181
+
182
+ with torch.no_grad():
183
+ time1 = time.time()
184
+ user_prompt = prompt
185
+
186
+
187
+ if len(stack) == 0:
188
+
189
+ if len(keywords.strip()) == 0:
190
+ template = f'Given a prompt that will be used to generate an image, plan the layout of visual text for the image. The size of the image is 128x128. Therefore, all properties of the positions should not exceed 128, including the coordinates of top, left, right, and bottom. All keywords are included in the caption. You dont need to specify the details of font styles. At each line, the format should be keyword left, top, right, bottom. So let us begin. Prompt: {user_prompt}'
191
+ else:
192
+ keywords = keywords.split('/')
193
+ keywords = [i.strip() for i in keywords]
194
+ template = f'Given a prompt that will be used to generate an image, plan the layout of visual text for the image. The size of the image is 128x128. Therefore, all properties of the positions should not exceed 128, including the coordinates of top, left, right, and bottom. In addition, we also provide all keywords at random order for reference. You dont need to specify the details of font styles. At each line, the format should be keyword left, top, right, bottom. So let us begin. Prompt: {prompt}. Keywords: {str(keywords)}'
195
+
196
+ msg = template
197
+ conv = get_conversation_template(m1_model_path)
198
+ conv.append_message(conv.roles[0], msg)
199
+ conv.append_message(conv.roles[1], None)
200
+ prompt = conv.get_prompt()
201
+ inputs = m1_tokenizer([prompt], return_token_type_ids=False)
202
+ inputs = {k: torch.tensor(v).to('cuda') for k, v in inputs.items()}
203
+ output_ids = m1_model.generate(
204
+ **inputs,
205
+ do_sample=True,
206
+ temperature=slider_temperature,
207
+ repetition_penalty=1.0,
208
+ max_new_tokens=512,
209
+ )
210
+
211
+ if m1_model.config.is_encoder_decoder:
212
+ output_ids = output_ids[0]
213
+ else:
214
+ output_ids = output_ids[0][len(inputs["input_ids"][0]) :]
215
+ outputs = m1_tokenizer.decode(
216
+ output_ids, skip_special_tokens=True, spaces_between_special_tokens=False
217
+ )
218
+ print(f"[{conv.roles[0]}]\n{msg}")
219
+ print(f"[{conv.roles[1]}]\n{outputs}")
220
+ ocrs = outputs.split('\n')
221
+ time2 = time.time()
222
+ print(time2-time1)
223
+
224
+ # user_prompt = prompt
225
+ current_ocr = ocrs
226
+
227
+ ocr_ids = []
228
+ print('user_prompt', user_prompt)
229
+ print('current_ocr', current_ocr)
230
+
231
+ for ocr in current_ocr:
232
+ ocr = ocr.strip()
233
+
234
+ if len(ocr) == 0 or '###' in ocr or '.com' in ocr:
235
+ continue
236
+
237
+ items = ocr.split()
238
+ pred = ' '.join(items[:-1])
239
+ box = items[-1]
240
+
241
+ l,t,r,b = box.split(',')
242
+ l,t,r,b = int(l), int(t), int(r), int(b)
243
+ ocr_ids.extend(['l'+str(l), 't'+str(t), 'r'+str(r), 'b'+str(b)])
244
+
245
+ char_list = list(pred)
246
+ char_list = [f'[{i}]' for i in char_list]
247
+ ocr_ids.extend(char_list)
248
+ ocr_ids.append(tokenizer.eos_token_id)
249
+
250
+ caption_ids = tokenizer(
251
+ user_prompt, truncation=True, return_tensors="pt"
252
+ ).input_ids[0].tolist()
253
+
254
+ try:
255
+ ocr_ids = tokenizer.encode(ocr_ids)
256
+ prompt = caption_ids + ocr_ids
257
+ except:
258
+ prompt = caption_ids
259
+
260
+ else:
261
+ user_prompt += ' <|endoftext|>'
262
+
263
+ for items in stack:
264
+ position, text = items
265
+
266
+ if len(position) == 2:
267
+ x, y = position
268
+ x = x // 4
269
+ y = y // 4
270
+ text_str = ' '.join([f'[{c}]' for c in list(text)])
271
+ user_prompt += f'<|startoftext|> l{x} t{y} {text_str} <|endoftext|>'
272
+ elif len(position) == 4:
273
+ x0, y0, x1, y1 = position
274
+ x0 = x0 // 4
275
+ y0 = y0 // 4
276
+ x1 = x1 // 4
277
+ y1 = y1 // 4
278
+ text_str = ' '.join([f'[{c}]' for c in list(text)])
279
+ user_prompt += f'<|startoftext|> l{x0} t{y0} r{x1} b{y1} {text_str} <|endoftext|>'
280
+
281
+ prompt = tokenizer.encode(user_prompt)
282
+
283
+ prompt = prompt[:77]
284
+ while len(prompt) < 77:
285
+ prompt.append(tokenizer.pad_token_id)
286
+ prompts_cond = prompt
287
+ prompts_nocond = [tokenizer.pad_token_id]*77
288
+
289
+ prompts_cond = [prompts_cond] * slider_batch
290
+ prompts_nocond = [prompts_nocond] * slider_batch
291
+
292
+ prompts_cond = torch.Tensor(prompts_cond).long().cuda()
293
+ prompts_nocond = torch.Tensor(prompts_nocond).long().cuda()
294
+
295
+ scheduler = DDPMScheduler.from_pretrained('runwayml/stable-diffusion-v1-5', subfolder="scheduler")
296
+ scheduler.set_timesteps(slider_step)
297
+ noise = torch.randn((slider_batch, 4, 64, 64)).to("cuda")
298
+ input = noise
299
+
300
+ encoder_hidden_states_cond = text_encoder(prompts_cond)[0]
301
+ encoder_hidden_states_nocond = text_encoder(prompts_nocond)[0]
302
+
303
+
304
+ for t in tqdm(scheduler.timesteps):
305
+ with torch.no_grad(): # classifier free guidance
306
+ noise_pred_cond = unet(sample=input, timestep=t, encoder_hidden_states=encoder_hidden_states_cond[:slider_batch]).sample # b, 4, 64, 64
307
+ noise_pred_uncond = unet(sample=input, timestep=t, encoder_hidden_states=encoder_hidden_states_nocond[:slider_batch]).sample # b, 4, 64, 64
308
+ noisy_residual = noise_pred_uncond + slider_guidance * (noise_pred_cond - noise_pred_uncond) # b, 4, 64, 64
309
+ prev_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample
310
+ input = prev_noisy_sample
311
+
312
+ # decode
313
+ input = 1 / vae.config.scaling_factor * input
314
+ images = vae.decode(input, return_dict=False)[0]
315
+ width, height = 512, 512
316
+ results = []
317
+ new_image = Image.new('RGB', (2*width, 2*height))
318
+ for index, image in enumerate(images.float()):
319
+ image = (image / 2 + 0.5).clamp(0, 1).unsqueeze(0)
320
+ image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
321
+ image = Image.fromarray((image * 255).round().astype("uint8")).convert('RGB')
322
+ results.append(image)
323
+ row = index // 2
324
+ col = index % 2
325
+ new_image.paste(image, (col*width, row*height))
326
+ # new_image.save(f'{args.output_dir}/pred_img_{sample_index}_{args.local_rank}.jpg')
327
+ results.insert(0, new_image)
328
+ return new_image
329
+
330
+ with gr.Blocks() as demo:
331
+
332
+ gr.HTML(
333
+ """
334
+ <div style="text-align: center; max-width: 1600px; margin: 20px auto;">
335
+ <h2 style="font-weight: 900; font-size: 2.7rem; margin: 0rem">
336
+ TextDiffuser-2: Unleashing the Power of Language Models for Text Rendering
337
+ </h2>
338
+ <h2 style="font-weight: 480; font-size: 1.4rem; margin: 0rem">
339
+ <a href="https://jingyechen.github.io/">Jingye Chen</a>, <a href="https://hypjudy.github.io/website/">Yupan Huang</a>, <a href="https://scholar.google.com/citations?user=0LTZGhUAAAAJ&hl=en">Tengchao Lv</a>, <a href="https://www.microsoft.com/en-us/research/people/lecu/">Lei Cui</a>, <a href="https://cqf.io/">Qifeng Chen</a>, <a href="https://thegenerality.com/">Furu Wei</a>
340
+ </h2>
341
+ <h2 style="font-weight: 460; font-size: 1.2rem; margin: 0rem">
342
+ HKUST, Sun Yat-sen University, Microsoft Research
343
+ </h2>
344
+ <h3 style="font-weight: 450; font-size: 1rem; margin: 0rem">
345
+ [<a href="https://arxiv.org/abs/2311.16465" style="color:blue;">arXiv</a>]
346
+ [<a href="https://github.com/microsoft/unilm/tree/master/textdiffuser-2" style="color:blue;">Code</a>]
347
+ </h3>
348
+ <h2 style="text-align: left; font-weight: 450; font-size: 1rem; margin-top: 0.5rem; margin-bottom: 0.5rem">
349
+ We propose <b>TextDiffuser-2</b>, aiming at unleashing the power of language models for text rendering. Specifically, we <b>tame a language model into a layout planner</b> to transform user prompt into a layout using the caption-OCR pairs. The language model demonstrates flexibility and automation by inferring keywords from user prompts or incorporating user-specified keywords to determine their positions. Secondly, we <b>leverage the language model in the diffusion model as the layout encoder</b> to represent the position and content of text at the line level. This approach enables diffusion models to generate text images with broader diversity.
350
+ </h2>
351
+ <h2 style="text-align: left; font-weight: 450; font-size: 1rem; margin-top: 0.5rem; margin-bottom: 0.5rem">
352
+ 👀 <b>Tips for using this demo</b>: <b>(1)</b> Please carefully read the disclaimer in the below. <b>(2)</b> The specification of keywords is optional. If provided, the language model will do its best to plan layouts using the given keywords. <b>(3)</b> If a template is given, the layout planner (M1) is not used. <b>(4)</b> Three operations, including redo, undo, and skip are provided. When using skip, only the left-top point of a keyword will be recorded, resulting in more diversity but sometimes decreasing the accuracy. <b>(5)</b> The layout planner can produce different layouts. You can control the temperature
353
+ </h2>
354
+
355
+ <style>
356
+ .scaled-image {
357
+ transform: scale(0.75);
358
+ }
359
+ </style>
360
+
361
+ <img src="file/architecture.jpg" alt="textdiffuser-2" class="scaled-image">
362
+ </div>
363
+ """)
364
+
365
+ with gr.Tab("Text-to-Image"):
366
+ with gr.Row():
367
+ with gr.Column(scale=1):
368
+ prompt = gr.Textbox(label="Input your prompt here.", placeholder="A beautiful city skyline stamp of Shanghai")
369
+ keywords = gr.Textbox(label="(Optional) Input your keywords here. Keywords should bu seperate by / (e.g., keyword1/keyword2/...)", placeholder="keyword1/keyword2")
370
+
371
+ # 这里加一个会话框
372
+ with gr.Row():
373
+ with gr.Column(scale=1):
374
+ i = gr.Image(label="Template", type='filepath', value='gray256.jpg', height=256, width=256)
375
+ with gr.Column(scale=3):
376
+ t = gr.Textbox(label="Template", placeholder='keyword')
377
+ redo = gr.Button(value='Redo - Cancel the last keyword') # 如何给b绑定事件
378
+ undo = gr.Button(value='Undo - Clear the canvas') # 如何给b绑定事件
379
+ skip_button = gr.Button(value='Skip - Operate next keyword') # 如何给b绑定事件
380
+
381
+ i.select(get_pixels,[i,t],[i])
382
+ redo.click(exe_redo, [i,t],[i])
383
+ undo.click(exe_undo, [i,t],[i])
384
+ skip_button.click(skip_fun, [i,t])
385
+
386
+ # radio = gr.Radio(["Stable Diffusion v2.1", "Stable Diffusion v1.5"], label="Pre-trained Model", value="Stable Diffusion v1.5")
387
+ slider_step = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Sampling step", info="The sampling step for TextDiffuser.")
388
+ slider_guidance = gr.Slider(minimum=1, maximum=9, value=7.5, step=0.5, label="Scale of classifier-free guidance", info="The scale of classifier-free guidance and is set to 7.5 in default.")
389
+ slider_batch = gr.Slider(minimum=1, maximum=4, value=4, step=1, label="Batch size", info="The number of images to be sampled.")
390
+ slider_temperature = gr.Slider(minimum=0.1, maximum=2, value=0.7, step=0.1, label="Temperature", info="Control the diversity of layout planner. Higher value indicates more diversity.")
391
+ # slider_seed = gr.Slider(minimum=1, maximum=10000, label="Seed", randomize=True)
392
+ button = gr.Button("Generate")
393
+
394
+ with gr.Column(scale=1):
395
+ output = gr.Image(label='Generated image')
396
+
397
+ # with gr.Accordion("Intermediate results", open=False):
398
+ # gr.Markdown("Layout, segmentation mask, and details of segmentation mask from left to right.")
399
+ # intermediate_results = gr.Image(label='')
400
+
401
+ # gr.Markdown("## Prompt Examples")
402
+
403
+ button.click(text_to_image, inputs=[prompt,keywords,slider_step,slider_guidance,slider_batch,slider_temperature], outputs=[output])
404
+
405
+
406
+
407
+
408
+ gr.HTML(
409
+ """
410
+ <div style="text-align: justify; max-width: 1200px; margin: 20px auto;">
411
+ <h3 style="font-weight: 450; font-size: 0.8rem; margin: 0rem">
412
+ <b>Version</b>: 1.0
413
+ </h3>
414
+ <h3 style="font-weight: 450; font-size: 0.8rem; margin: 0rem">
415
+ <b>Contact</b>:
416
+ For help or issues using TextDiffuser-2, please email Jingye Chen <a href="mailto:qwerty.chen@connect.ust.hk">(qwerty.chen@connect.ust.hk)</a>, Yupan Huang <a href="mailto:huangyp28@mail2.sysu.edu.cn">(huangyp28@mail2.sysu.edu.cn)</a> or submit a GitHub issue. For other communications related to TextDiffuser-2, please contact Lei Cui <a href="mailto:lecu@microsoft.com">(lecu@microsoft.com)</a> or Furu Wei <a href="mailto:fuwei@microsoft.com">(fuwei@microsoft.com)</a>.
417
+ </h3>
418
+ <h3 style="font-weight: 450; font-size: 0.8rem; margin: 0rem">
419
+ <b>Disclaimer</b>:
420
+ Please note that the demo is intended for academic and research purposes <b>ONLY</b>. Any use of the demo for generating inappropriate content is strictly prohibited. The responsibility for any misuse or inappropriate use of the demo lies solely with the users who generated such content, and this demo shall not be held liable for any such use.
421
+ </h3>
422
+ </div>
423
+ """
424
+ )
425
+
426
+
427
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ setuptools==66.0.0
2
+ datasets==2.11.0
3
+ transformers==4.28.1
4
+ accelerate==0.22.0
5
+ diffusers==0.24.0
6
+ fschat==0.2.26
7
+ pillow==10.1.0