import os, sys, torch import gradio as gr import torchvision.utils as vutils import torchvision.transforms as transforms from dalle.models import StoryDalle import argparse from PIL import Image from torchvision.utils import save_image import tensorflow as tf import tensorflow_hub as hub import gdown from allennlp.predictors.predictor import Predictor import random torch.set_grad_enabled(False) tf.config.set_visible_devices([], 'GPU') # setting Tensorflow's GPU visibility to None to constraing embedding model to CPU source_frame_paths = { 'Pororo': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH1_2/Pororo_ENGLISH1_2_ep6/12.png', 'Loopy': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH1_1/Pororo_ENGLISH1_1_ep12/26.png', 'Crong': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH1_1/Pororo_ENGLISH1_1_ep12/10.png', 'Poby': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH1_1/Pororo_ENGLISH1_1_ep9/34.png', 'Eddy': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH1_1/Pororo_ENGLISH1_1_ep12/46.png', 'Petty': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH2_1/Pororo_ENGLISH2_1_ep1/34.png', 'Tongtong': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH3_1/Pororo_ENGLISH3_1_ep7/8.png', 'Rody': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH3_1/Pororo_ENGLISH3_1_ep6/66.png', 'Harry': '/playpen-ssd/adyasha/projects/StoryGAN/pororo_png/Pororo_ENGLISH3_1/Pororo_ENGLISH3_1_ep7/39.png', } def get_span_words(span, document): return ' '.join(document[span[0]:span[1]+1]) def print_clusters(prediction): document, clusters = prediction['document'], prediction['clusters'] for cluster in clusters: print(get_span_words(cluster[0], document) + ': ', end='') print(f"[{'; '.join([get_span_words(span, document) for span in cluster])}]") def resolve_coref(captions, captions_mask, coref_predictor): sent_counts = [] doc = '' for cap, mask in zip(captions, captions_mask): if mask == 0: sent_counts.append(0) else: print(cap) count = len([c.strip() for c in cap.split('.') if c.strip()]) sent_counts.append(count) doc += cap + ' ' # print(doc) doc = doc.strip() resolved_doc = coref_predictor.coref_resolved(doc) # print(resolved_doc) # print(sent_counts) sents = resolved_doc.split('. ') resolved_captions = [] for i, (count, mask) in enumerate(zip(sent_counts, captions_mask)): if mask == 0: resolved_captions.append('') else: new_cap = '. '.join(sents[sum(sent_counts[:i]):sum(sent_counts[:i]) + count]) new_cap = new_cap.strip() if new_cap[-1] not in ['!', '?', '.']: new_cap += '.' resolved_captions.append(new_cap) return resolved_captions def inverse_normalize(tensor, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)): mean = torch.as_tensor(mean, dtype=tensor.dtype, device=tensor.device) std = torch.as_tensor(std, dtype=tensor.dtype, device=tensor.device) if mean.ndim == 1: mean = mean.view(-1, 1, 1) if std.ndim == 1: std = std.view(-1, 1, 1) tensor.mul_(std).add_(mean) return tensor def save_story_results(images, video_len=4, n_candidates=1, mask=None): # print("Generated Images shape: ", images.shape) if mask is None: mask = [1 for _ in range(len(video_len))] all_images = [] for i in range(len(images)): # batch size = 1 for j in range(n_candidates): story = [] for k, m in enumerate(mask): if m == 1: story.append(images[i][j][k]) all_images.append(vutils.make_grid(story, sum(mask), padding=0)) all_images = vutils.make_grid(all_images, 1, padding=20) print(all_images) pad_len = video_len - sum(mask) if pad_len > 0: pad_height = 256 * n_candidates + 20 * (n_candidates + 1) pad_width = 256 * pad_len + 20 * (pad_len) pad_image = torch.ones(3, pad_height, pad_width) print(all_images.shape, pad_image.shape) all_images = torch.cat([all_images[:, :, :-15], pad_image], dim=-1) print(all_images.shape) return all_images[:, 15:-15, 15:-15] def main(args): #device = 'cuda:0' device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # device = torch.device('cpu') model_url = 'https://drive.google.com/u/1/uc?id=1KAXVtE8lEE2Yc83VY7w6ycOOMkdWbmJo&export=sharing' #model_url = 'https://drive.google.com/u/1/uc?id=1lJ6zMZ6qTvFu6H35-VEdFlN13MMslivJ&export=download' png_url = 'https://drive.google.com/u/1/uc?id=1C33A1IzSHDPoQ4QBsgFWbF61QWaAxRo_&export=download' #if not os.path.exists("./ckpt/25.pth"): # gdown.download(model_url, quiet=False, use_cookies=False, output="./ckpt/25.pth") # print("Downloaded checkpoint") #assert os.path.exists("./ckpt/25.pth") gdown.download(png_url, quiet=True, use_cookies=False, output="demo_pororo_good.png") coref_model_url = 'https://storage.googleapis.com/allennlp-public-models/coref-spanbert-large-2020.02.27.tar.gz' coref_predictor = Predictor.from_path(coref_model_url) if args.debug: model = None embed = None else: model, config = StoryDalle.from_pretrained(args) model.tokenizer.add_tokens(['pororo', 'loopy', 'eddy', 'harry', 'poby', 'tongtong', 'crong', 'rody', 'petty']) model.eval() # split_model into CPU and GPU if args.split_memory: model.stage2.to(device=device) model.story_linear.to(device=device) model.story_block.to(device=device) else: model.to(device=device) if model.config.story.condition: for i in range(len(model.cross_attention_layers)): model.cross_attention_layers[i].to(device) print("Cross-attention layers are in cuda:", next(model.cross_attention_layers[0].parameters()).is_cuda) embed = hub.load("https://tfhub.dev/google/universal-sentence-encoder-large/5") valid_transform = transforms.Compose( [transforms.Resize(config.dataset.image_resolution), transforms.CenterCrop(config.dataset.image_resolution), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])] ) print("Model is in ", model.device) #torch.save(model, './ckpt/checkpoint.pt') #sys.exit() def predict(caption_1, caption_2, caption_3, caption_4, source='Pororo', top_k=32, top_p=0.2, n_candidates=4, supercondition=False): if not args.debug: suffix = random.randint(0, 1000) img_file_path = "./demo/images/gradio_demo_pororo_%s.png" % suffix txt_file_path = "./demo/texts/gradio_demo_pororo_%s.txt" % suffix captions = [caption_1.strip(), caption_2.strip(), caption_3.strip(), caption_4.strip()] for i in range(len(captions)): if captions[i][-1] not in ['!', '?', '.']: captions[i] = captions[i] + '.' mask = [1 if caption != '' else 0 for caption in captions] with open(txt_file_path, 'w') as f: f.write('\n'.join(captions)) print(captions, mask, source, n_candidates) captions = resolve_coref(captions, mask, coref_predictor) print(captions) for i, caption in enumerate(captions): if caption == "": captions[i] = "Pororo is reading a book." # filler for shorter captions tokens = [model.tokenizer.encode(caption) for caption in captions] texts = torch.stack([torch.LongTensor(token.ids) for token in tokens]).unsqueeze(0) sent_embeds = torch.tensor(embed(captions).numpy()) src_image = valid_transform(Image.open('./demo/%s.png' % source).convert('RGB')) stories = [] with torch.no_grad(): for i in range(texts.shape[0]): candidates = [] # for _ in range(n_candidates): # if args.split_memory: # if splitting model into CPU/GPU, send src_image from CPU memory # pixels = model.sampling_batch(texts[i].to(device), src_image.unsqueeze(0), # sent_embeds.unsqueeze(0).to(device), top_k=top_k, top_p=top_p, # prompt=None, n_candidates=1, device=device).cpu() # else: # pixels = model.sampling_batch(texts[i].to(device), src_image.unsqueeze(0).to(device), # sent_embeds.unsqueeze(0).to(device), top_k=top_k, top_p=top_p, # prompt=None, n_candidates=1).cpu() # print(pixels.shape) # candidates.append(pixels.squeeze()) # stories.append(torch.stack(candidates)) #with torch.cuda.amp.autocast(): pixels = model.sampling_batch(texts[i].to(device), src_image.unsqueeze(0).to(device), sent_embeds.unsqueeze(0).to(device), top_k=top_k, top_p=top_p, prompt=None, n_candidates=n_candidates).cpu() stories.append(pixels) img = save_story_results(stories, video_len=4, n_candidates=n_candidates, mask=mask) save_image(img, img_file_path, normalize=True) else: img_file_path = "gradio_demo_pororo.png" return img_file_path with gr.Blocks(css='#output {width:750px; height:750px; float:left;}') as demo: gr.Markdown('''
StoryDALL-E: Adapting Pretrained Text-to-Image Transformers for Story Continuation
Adyasha Maharana, Darryl Hannan and Mohit Bansal (UNC Chapel Hill)
Published at ECCV 2022
The training dataset contains nearly 10,000 samples in the training set. Most of the scenes occur in a snowy village, surrounded by hills, trees and houses. A few episodes are located in gardens or water bodies. All the captions are in the English language and predominantly contain verbs in the present tense. Additionally, the training of this model starts from the pretrained checkpoint of mega-dalle, which is trained on the Conceptual Captions dataset. ### Intended Use This model is intended for generating visual stories containing the 9 characters in the Pororo dataset. This version of the StoryDALL-E model is reasonable at the following scenarios: * Frames containing a single character. * Overtly visual actions such as *making cookies*, *walking*, *reading a book*, *sitting*. * Scenes taking place in snowy settings, indoors and gardens. * Visual stories contaning 1-3 characters across all frames. * Scene transitions e.g. from day to night. * Moderately capable of generating semantic concepts that do not appear in the story continuation dataset, such as *doughnut* and *lion*. Here are some examples of generated visual stories for the above-mentioned settings.
Due to the small training dataset size for story visualization, the model has poor generalization to some unseen settings. The model struggles to generate coherent images in the following scenarios. * Multiple characters in a frame. * Non-visual actions such as *compliment*. * Characters that are infrequent in the training dataset e.g. Rody, Harry. * Background locations that are not found in the cartoon e.g. a busy city. * Color-based descriptions for object. * Completely new characters based on textual descriptions. In the following demo, four or less captions can be entered in the `caption` text fields for the visual story. Select a `source` frame based on the character that is predominant in your visual story. `top_k` refers to the number of highest probability vocabulary tokens to keep for top-k-filtering. Only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. Set `supercondition` to True to enable generation using a null hypothesis. Select between 1-4 `n_candidates` to generate a diverse set of stories for the given captions.