bipin commited on
Commit
32613f0
1 Parent(s): cae4936

removed unused imports and added more details

Browse files
Files changed (3) hide show
  1. app.py +8 -1
  2. dog_image.jpg +0 -0
  3. prefix_clip.py +1 -9
app.py CHANGED
@@ -19,9 +19,15 @@ def main(pil_image, genre, model="Conceptual", use_beam_search=True):
19
 
20
 
21
  if __name__ == "__main__":
 
 
 
 
22
  interface = gr.Interface(
23
  main,
24
- title="image2story",
 
 
25
  inputs=[
26
  gr.inputs.Image(type="pil", source="upload", label="Input"),
27
  gr.inputs.Dropdown(
@@ -38,6 +44,7 @@ if __name__ == "__main__":
38
  ),
39
  ],
40
  outputs=gr.outputs.Textbox(label="Generated story"),
 
41
  enable_queue=True,
42
  )
43
  interface.launch()
19
 
20
 
21
  if __name__ == "__main__":
22
+ title = "Image to Story"
23
+ article = "Combines the power of [clip prefix captioning](https://github.com/rmokady/CLIP_prefix_caption) with [gpt2 story generator](https://huggingface.co/pranavpsv/genre-story-generator-v2) to create stories of different genres from image"
24
+ description = "Drop an image and generate stories of different genre based on that image"
25
+
26
  interface = gr.Interface(
27
  main,
28
+ title=title,
29
+ description=description,
30
+ article=article,
31
  inputs=[
32
  gr.inputs.Image(type="pil", source="upload", label="Input"),
33
  gr.inputs.Dropdown(
44
  ),
45
  ],
46
  outputs=gr.outputs.Textbox(label="Generated story"),
47
+ examples=[["dog_image.jpg", "action"]],
48
  enable_queue=True,
49
  )
50
  interface.launch()
dog_image.jpg ADDED
prefix_clip.py CHANGED
@@ -1,23 +1,15 @@
1
  import clip
2
- import os
3
  from torch import nn
4
  import numpy as np
5
  import torch
6
  import torch.nn.functional as nnf
7
- import sys
8
  import gdown
9
  from typing import Tuple, List, Union, Optional
10
  from transformers import (
11
  GPT2Tokenizer,
12
  GPT2LMHeadModel,
13
- AdamW,
14
- get_linear_schedule_with_warmup,
15
  )
16
- from tqdm import tqdm, trange
17
- from google.colab import files
18
- import skimage.io as io
19
- import PIL.Image
20
- from IPython.display import Image
21
 
22
 
23
  N = type(None)
1
  import clip
 
2
  from torch import nn
3
  import numpy as np
4
  import torch
5
  import torch.nn.functional as nnf
 
6
  import gdown
7
  from typing import Tuple, List, Union, Optional
8
  from transformers import (
9
  GPT2Tokenizer,
10
  GPT2LMHeadModel,
 
 
11
  )
12
+ from tqdm import trange
 
 
 
 
13
 
14
 
15
  N = type(None)