File size: 24,716 Bytes
1cac669
3d5e231
 
 
 
 
 
 
908bed5
3d5e231
 
908bed5
 
3d5e231
908bed5
 
3d5e231
 
 
 
 
 
 
 
 
 
 
 
 
 
908bed5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d5e231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
908bed5
77e955b
 
908bed5
3d5e231
e217e55
c9b7beb
 
02b86b7
3d5e231
da4893d
 
 
1cac669
3d5e231
 
908bed5
 
 
3d5e231
 
 
 
 
 
 
908bed5
 
 
 
 
 
 
 
 
 
 
 
3d5e231
 
 
 
 
 
 
 
 
 
908bed5
 
1cac669
 
 
3d5e231
 
 
 
908bed5
 
 
 
 
 
 
 
 
3d5e231
908bed5
 
 
 
3d5e231
908bed5
 
 
3d5e231
 
908bed5
 
3d5e231
 
 
 
 
 
 
 
908bed5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d5e231
908bed5
 
3d5e231
 
908bed5
 
 
 
3d5e231
908bed5
3d5e231
 
 
 
 
 
 
 
e217e55
3d5e231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
908bed5
3d5e231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
908bed5
6e1f143
3d5e231
 
 
908bed5
3d5e231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
908bed5
3d5e231
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
import os, sys, torch
import gradio as gr
import torchvision.utils as vutils
import torchvision.transforms as transforms
from dalle.models import StoryDalle
import argparse
from PIL import Image
from torchvision.utils import save_image
import tensorflow as tf
import tensorflow_hub as hub
import gdown
from allennlp.predictors.predictor import Predictor
import random

torch.set_grad_enabled(False)
tf.config.set_visible_devices([], 'GPU') # setting Tensorflow's GPU visibility to None to constraing embedding model to CPU

source_frame_paths = {
    'Pororo': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH1_2/Pororo_ENGLISH1_2_ep6/12.png',
    'Loopy': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH1_1/Pororo_ENGLISH1_1_ep12/26.png',
    'Crong': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH1_1/Pororo_ENGLISH1_1_ep12/10.png',
    'Poby': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH1_1/Pororo_ENGLISH1_1_ep9/34.png',
    'Eddy': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH1_1/Pororo_ENGLISH1_1_ep12/46.png',
    'Petty': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH2_1/Pororo_ENGLISH2_1_ep1/34.png',
    'Tongtong': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH3_1/Pororo_ENGLISH3_1_ep7/8.png',
    'Rody': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH3_1/Pororo_ENGLISH3_1_ep6/66.png',
    'Harry': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH3_1/Pororo_ENGLISH3_1_ep7/39.png',
}


def get_span_words(span, document):
    return ' '.join(document[span[0]:span[1]+1])


def print_clusters(prediction):
    document, clusters = prediction['document'], prediction['clusters']
    for cluster in clusters:
        print(get_span_words(cluster[0], document) + ': ', end='')
        print(f"[{'; '.join([get_span_words(span, document) for span in cluster])}]")


def resolve_coref(captions, captions_mask, coref_predictor):
    sent_counts = []
    doc = ''
    for cap, mask in zip(captions, captions_mask):
        if mask == 0:
            sent_counts.append(0)
        else:
            print(cap)
            count = len([c.strip() for c in cap.split('.') if c.strip()])
            sent_counts.append(count)
            doc += cap + ' '

    # print(doc)

    doc = doc.strip()
    resolved_doc = coref_predictor.coref_resolved(doc)
    # print(resolved_doc)
    # print(sent_counts)

    sents = resolved_doc.split('. ')
    resolved_captions = []
    for i, (count, mask) in enumerate(zip(sent_counts, captions_mask)):
        if mask == 0:
            resolved_captions.append('')
        else:
            new_cap = '. '.join(sents[sum(sent_counts[:i]):sum(sent_counts[:i]) + count])
            new_cap = new_cap.strip()
            if new_cap[-1] not in ['!', '?', '.']:
                new_cap += '.'
            resolved_captions.append(new_cap)

    return resolved_captions


def inverse_normalize(tensor, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)):
    mean = torch.as_tensor(mean, dtype=tensor.dtype, device=tensor.device)
    std = torch.as_tensor(std, dtype=tensor.dtype, device=tensor.device)
    if mean.ndim == 1:
        mean = mean.view(-1, 1, 1)
    if std.ndim == 1:
        std = std.view(-1, 1, 1)
    tensor.mul_(std).add_(mean)
    return tensor


def save_story_results(images, video_len=4, n_candidates=1, mask=None):
    # print("Generated Images shape: ", images.shape)

    if mask is None:
        mask = [1 for _ in range(len(video_len))]

    all_images = []
    for i in range(len(images)):  # batch size = 1
        for j in range(n_candidates):
            story = []
            for k, m in enumerate(mask):
                if m == 1:
                    story.append(images[i][j][k])
            all_images.append(vutils.make_grid(story, sum(mask), padding=0))
    all_images = vutils.make_grid(all_images, 1, padding=20)
    print(all_images)

    pad_len = video_len - sum(mask)

    if pad_len > 0:
        pad_height = 256 * n_candidates  + 20 * (n_candidates + 1)
        pad_width = 256 * pad_len + 20 * (pad_len)
        pad_image = torch.ones(3, pad_height, pad_width)

        print(all_images.shape, pad_image.shape)
        all_images = torch.cat([all_images[:, :, :-15], pad_image], dim=-1)

    print(all_images.shape)
    return all_images[:, 15:-15, 15:-15]


def main(args):

    #device = 'cuda:0'
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # device = torch.device('cpu')

    model_url = 'https://drive.google.com/u/1/uc?id=1KAXVtE8lEE2Yc83VY7w6ycOOMkdWbmJo&export=sharing'

    #model_url = 'https://drive.google.com/u/1/uc?id=1lJ6zMZ6qTvFu6H35-VEdFlN13MMslivJ&export=download'
    png_url = 'https://drive.google.com/u/1/uc?id=1C33A1IzSHDPoQ4QBsgFWbF61QWaAxRo_&export=download'

    #if not os.path.exists("./ckpt/25.pth"):
    #    gdown.download(model_url, quiet=False, use_cookies=False, output="./ckpt/25.pth")
    #    print("Downloaded checkpoint")
    #assert os.path.exists("./ckpt/25.pth")
    gdown.download(png_url, quiet=True, use_cookies=False, output="demo_pororo_good.png")

    coref_model_url = 'https://storage.googleapis.com/allennlp-public-models/coref-spanbert-large-2020.02.27.tar.gz'
    coref_predictor = Predictor.from_path(coref_model_url)

    if args.debug:
        model = None
        embed = None
    else:
        model, config = StoryDalle.from_pretrained(args)
        model.tokenizer.add_tokens(['pororo', 'loopy', 'eddy', 'harry', 'poby', 'tongtong', 'crong', 'rody', 'petty'])
        model.eval()
        # split_model into CPU and GPU
        if args.split_memory:
            model.stage2.to(device=device)
            model.story_linear.to(device=device)
            model.story_block.to(device=device)
        else:
            model.to(device=device)
            if model.config.story.condition:
                for i in range(len(model.cross_attention_layers)):
                    model.cross_attention_layers[i].to(device)
                print("Cross-attention layers are in cuda:", next(model.cross_attention_layers[0].parameters()).is_cuda)

        embed = hub.load("https://tfhub.dev/google/universal-sentence-encoder-large/5")


        valid_transform = transforms.Compose(
            [transforms.Resize(config.dataset.image_resolution),
             transforms.CenterCrop(config.dataset.image_resolution),
             transforms.ToTensor(),
             transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
        )

        print("Model is in ", model.device)

    #torch.save(model, './ckpt/checkpoint.pt')
    #sys.exit()

    def predict(caption_1, caption_2, caption_3, caption_4, source='Pororo', top_k=32, top_p=0.2, n_candidates=4,
                supercondition=False):

        if not args.debug:

            suffix = random.randint(0, 1000)
            img_file_path = "./demo/images/gradio_demo_pororo_%s.png" % suffix
            txt_file_path = "./demo/texts/gradio_demo_pororo_%s.txt" % suffix

            captions = [caption_1.strip(), caption_2.strip(), caption_3.strip(), caption_4.strip()]
            for i in range(len(captions)):
                if captions[i][-1] not in ['!', '?', '.']:
                    captions[i] = captions[i] + '.'
            mask = [1 if caption != '' else 0 for caption in captions]

            with open(txt_file_path, 'w') as f:
                f.write('\n'.join(captions))

            print(captions, mask, source, n_candidates)
            captions = resolve_coref(captions, mask, coref_predictor)
            print(captions)

            for i, caption in enumerate(captions):
                if caption == "":
                    captions[i] = "Pororo is reading a book." # filler for shorter captions

            tokens = [model.tokenizer.encode(caption) for caption in captions]
            texts = torch.stack([torch.LongTensor(token.ids) for token in tokens]).unsqueeze(0)
            sent_embeds = torch.tensor(embed(captions).numpy())
            src_image = valid_transform(Image.open('./demo/%s.png' % source).convert('RGB'))

            stories = []
            with torch.no_grad():
                for i in range(texts.shape[0]):
                    candidates = []
                    # for _ in range(n_candidates):
                    #     if args.split_memory: # if splitting model into CPU/GPU, send src_image from CPU memory
                    #         pixels = model.sampling_batch(texts[i].to(device), src_image.unsqueeze(0),
                    #                                       sent_embeds.unsqueeze(0).to(device), top_k=top_k, top_p=top_p,
                    #                                       prompt=None, n_candidates=1, device=device).cpu()
                    #     else:
                    #         pixels = model.sampling_batch(texts[i].to(device), src_image.unsqueeze(0).to(device),
                    #                                       sent_embeds.unsqueeze(0).to(device), top_k=top_k, top_p=top_p,
                    #                                       prompt=None, n_candidates=1).cpu()
                    #     print(pixels.shape)
                    #     candidates.append(pixels.squeeze())
                    # stories.append(torch.stack(candidates))
                    #with torch.cuda.amp.autocast():

                    pixels = model.sampling_batch(texts[i].to(device), src_image.unsqueeze(0).to(device),
                                              sent_embeds.unsqueeze(0).to(device), top_k=top_k, top_p=top_p,
                                              prompt=None, n_candidates=n_candidates).cpu()
                    stories.append(pixels)
            img = save_story_results(stories, video_len=4, n_candidates=n_candidates, mask=mask)
            save_image(img,  img_file_path, normalize=True)

        else:
            img_file_path = "gradio_demo_pororo.png"

        return img_file_path

    with gr.Blocks(css='#output {width:750px; height:750px; float:left;}') as demo:
        gr.Markdown('''
        <p style="text-align: center;font-size:40px;"><b>StoryDALL-E: Adapting Pretrained Text-to-Image Transformers for Story Continuation</b><br><font size="6">Adyasha Maharana, Darryl Hannan and Mohit Bansal (UNC Chapel Hill)<br>Published at <b>ECCV 2022</b></font></p>

        StoryDALL-E \[1\] is a model trained for the task of Story Visualization \[2\].
        The model receives a sequence of captions as input and generates a corresponding sequence of images which form a visual story depicting the narrative in the captions. 
        We modify this task to enable the model to receive an initial scene as input, which can be used as a cue for the setting of the story and also for generating unseen or low-resource visual elements. We refer to this task as Story Continuation \[1\].
        StoryDALL-E is based on the [dalle](https://github.com/kakaobrain/minDALL-E) model.
        **This model has been developed for academic purposes only.**

        \[[Paper](http://arxiv.org/abs/2209.06192)\]  \[[Code](https://github.com/adymaharana/storydalle)\] \[[Model Card](https://github.com/adymaharana/storydalle/blob/main/MODEL_CARD.MD)\]

        ### Dataset
        This model has been trained using the Pororo story visualization dataset \[1\].
        The data was adapted from the popular cartoon series *Pororo the Little Penguin* and originally released by \[2\].
        The Pororo dataset contains 9 recurring characters, as shown below, in the decreasing order of their frequency in the training data.
        <p align="center">
            <img src="file/pororo_characters.png" width="800">
        </p>
        The training dataset contains nearly 10,000 samples in the training set. Most of the scenes occur in a snowy village, surrounded by hills, trees and houses. A few episodes are located in gardens or water bodies. All the captions are in the English language and predominantly contain verbs in the present tense. Additionally, the training of this model starts from the pretrained checkpoint of mega-dalle, which is trained on the Conceptual Captions dataset.

        ### Intended Use
        This model is intended for generating visual stories containing the 9 characters in the Pororo dataset. This version of the StoryDALL-E model is reasonable at the following scenarios:
        * Frames containing a single character.
        * Overtly visual actions such as *making cookies*, *walking*, *reading a book*, *sitting*.
        * Scenes taking place in snowy settings, indoors and gardens.
        * Visual stories contaning 1-3 characters across all frames.
        * Scene transitions e.g. from day to night.
        * Moderately capable of generating semantic concepts that do not appear in the story continuation dataset, such as *doughnut* and *lion*.

        Here are some examples of generated visual stories for the above-mentioned settings.

        <p align="center">
            <img src="file/demo_pororo_good_v1.png" width="1000">
        </p>

        Due to the small training dataset size for story visualization, the model has poor generalization to some unseen settings. The model struggles to generate coherent images in the following scenarios.
        * Multiple characters in a frame.
        * Non-visual actions such as *compliment*.
        * Characters that are infrequent in the training dataset e.g. Rody, Harry.
        * Background locations that are not found in the cartoon e.g. a busy city.
        * Color-based descriptions for object.
        * Completely new characters based on textual descriptions.

        In the following demo, four or less captions can be entered in the `caption` text fields for the visual story. 
        Select a `source` frame based on the character that is predominant in your visual story.
        `top_k` refers to the number of highest probability vocabulary tokens to keep for top-k-filtering.
        Only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation.
        Set `supercondition` to True to enable generation using a null hypothesis. 
        Select between 1-4 `n_candidates` to generate a diverse set of stories for the given captions.
        <br><br>
        Feel free to send feedback to adyasha@cs.unc.edu.
        ''')

        with gr.Row():
            with gr.Column():
                caption_1 = gr.Textbox(label="Caption 1", value='Pororo is reading a book.')
                caption_2 = gr.Textbox(label="Caption 2", value='Pororo is sleeping on the couch.')
                caption_3 = gr.Textbox(label="Caption 3", value='Pororo wakes up in the middle of the night in his bed.')
                caption_4 = gr.Textbox(label="Caption 4", value='Pororo is in his bedroom and looks terrified.')
                source = gr.Radio(["Pororo", "Loopy", "Crong", "Poby", "Eddy", "Petty", "Tongtong", "Rody", "Harry"],
                                  label="Source", value="Pororo")
                top_k = gr.Slider(16, 128, label="top_k", value=32)
                top_p = gr.Slider(0.01, 1.0, label="top_p", value=0.2)
                supercondition = gr.Checkbox(value=False, label='supercondition')
                n_candidates = gr.Dropdown([1, 2, 3, 4], value=4, label='n_candidates')

                with gr.Row():
                    # clear_btn = gr.Button("Clear")
                    submit_btn = gr.Button("Submit")

            with gr.Column():
                with gr.Row():
                    frame_1_label = gr.Button("Frame 1")
                    frame_2_label = gr.Button("Frame 2")
                    frame_3_label = gr.Button("Frame 3")
                    frame_4_label = gr.Button("Frame 4")
                    # frame_1_label = gr.Label("Frame 1")
                    # frame_2_label = gr.Label("Frame 2")
                    # frame_3_label = gr.Label("Frame 3")
                    # frame_4_label = gr.Label("Frame 4")
                output = gr.Image(label="", elem_id='output')

        submit_btn.click(fn=predict,
                         inputs=[caption_1, caption_2, caption_3, caption_4, source, top_k, top_p, n_candidates,
                                 supercondition], outputs=output)

        gr.Markdown('''
        ### References

        \[1\] Maharana, Adyasha, et al. "StoryDALL-E: Adapting Pretrained Text-to-Image Transformers for Story Continuation." ECCV. 2022.

        \[2\] Li, Yitong, et al. "Storygan: A sequential conditional gan for story visualization." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2019.

        \[3\] Kim, Kyung-Min, et al. "DeepStory: video story QA by deep embedded memory networks." Proceedings of the 26th International Joint Conference on Artificial Intelligence. 2017.

        \[4\] Sharma, Piyush, et al. "Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning." Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 2018.
        ''')

    demo.launch(share=False)
    demo.launch()


if __name__ == "__main__":

    args_list = ['--model_name_or_path', './ckpt/25.pth',
                 '--prefix_model_name_or_path', './1.3B/',
                 '--dataset_name', 'pororo',
                 '--tuning_mode', 'story',
                 '--preseqlen', '32',
                 '--condition',
                 '--story_len', '4',
                 '--sent_embed', '512',
                 '--prefix_dropout', '0.2',
                 '--data_dir', '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/',
                 '--dataloader_num_workers', '1',
                 '--do_eval',
                 '--per_gpu_eval_batch_size', '16',
                 '--mode', 'story']

    parser = argparse.ArgumentParser(description='arguments for training/evaluating prefix-tuning DALLE')

    # Model Arguments
    parser.add_argument('--model_name_or_path', type=str, default=None,
                        help='The model checkpoint for weights initialization.')
    parser.add_argument('--prefix_model_name_or_path', type=str, default=None,
                        help='The prefix model checkpoint for weights initialization.')
    parser.add_argument('--prefix_mode', type=str, default='activation', help='activation or embedding')
    parser.add_argument('--preseqlen', type=int, default=0, help='how many tokens of prefix should we include.')
    parser.add_argument('--optim_prefix', action="store_true",
                        help='set to True if optimizing prefix directly; no if through amortized function')
    parser.add_argument('--tuning_mode', type=str, default='prefixtune', help='prefixtune or finetune')
    parser.add_argument('--top_k_layers', type=int, default=2,
                        help='In finetuning setting, if we only tune the top k layers.')
    parser.add_argument('--parameterize_mode', type=str, default='mlp',
                        help="mlp or emb to parametrize when we optimize for the embeddings.")
    parser.add_argument('--prefix_dropout', type=float, default=0.0, help='dropout rate for the prefix tuning model.')
    parser.add_argument('--teacher_dropout', type=float, default=0.0, help='dropout rate for the teacher model.')
    parser.add_argument('--init_random', action="store_true", help="set True if initializing random embeddings")
    parser.add_argument('--init_shallow', action="store_true", help="set True if not using reparameterization")
    parser.add_argument('--init_shallow_word', type=bool, default=False,
                        help="set True if init_shallow and specify words")
    parser.add_argument('--replay_buffer', action="store_true", help="set True if using replay buffer in training")
    parser.add_argument('--gumbel', action="store_true", help="set True if using the gumbel softmax in training")
    parser.add_argument('--hidden_dim_prefix', type=float, default=512, help="hidden dim of MLP for generating prefix?")

    # Data Arguments
    parser.add_argument('--dataset_name', type=str, default='pororo', help="dataset name")
    parser.add_argument('--data_dir', type=str, default=None, help="Path to data directory")
    parser.add_argument('--lowdata_token', type=str, default='story',
                        help="The token to be prepended at initialization time.")
    parser.add_argument('--use_lowdata_token', type=bool, default=True,
                        help="Whether we should use the lowdata token for prefix-tuning")
    parser.add_argument('--train_embeddings', action="store_true", help="Whether to train word embeddings")
    parser.add_argument('--train_max_target_length', type=int, default=100,
                        help='the max target length for training data.')
    parser.add_argument('--val_max_target_length', type=int, default=100, help='the max target length for dev data.')
    parser.add_argument('--dataloader_num_workers', type=int, default=8, help='number of workers when loading data')

    # new arguments for story
    parser.add_argument('--prompt', action="store_true", help="set True if using prompts in StoryDALLE")
    parser.add_argument('--story_len', type=int, default=4, help='the max target length for dev data.')
    parser.add_argument('--sent_embed', type=int, default=384, help='the max target length for dev data.')
    parser.add_argument('--condition', action="store_true", help="set True if using prompts in StoryDALLE")
    parser.add_argument('--clip_embed', action="store_true", help="set True if using prompts in StoryDALLE")

    # Training Arguments
    parser.add_argument('--output_dir', type=str, default=None, help="Path to data directory")
    parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
    parser.add_argument("--do_eval", action="store_true", help="Whether to run evaluation.")
    parser.add_argument("--do_test", action="store_true", help="Whether to run test.")
    parser.add_argument('--seed', type=int, default=42, help='seed for reproducibility')
    parser.add_argument("--overwrite_output_dir", action="store_true", help="Whether to overwrite output dir.")
    parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
    parser.add_argument(
        "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
    )
    parser.add_argument(
        "--gradient_accumulation_steps",
        type=int,
        default=1,
        help="Number of updates steps to accumulate before performing a backward/update pass.",
    )

    parser.add_argument('--mode', type=str, default='val', help="mval or test.")

    parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
    parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.")
    parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
    parser.add_argument(
        "--num_train_epochs", default=3, type=int, help="Total number of training epochs to perform."
    )
    parser.add_argument(
        "--max_steps",
        default=-1,
        type=int,
        help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
    )
    parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
    parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.")
    parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.")
    parser.add_argument(
        "--eval_all_checkpoints",
        action="store_true",
        help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
    )
    parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
    parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
    parser.add_argument(
        "--fp16",
        action="store_true",
        help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
    )

    parser.add_argument("--debug", action="store_true", help="Whether to debug the demo.")
    parser.add_argument("--split_memory", action="store_true", help="Whether to split the model into GPU & CPU in the demo.")

    args = parser.parse_args(args_list)

    main(args)