File size: 5,268 Bytes
d66d160
8090b75
 
 
4625865
8f420ad
ff7eae9
c47a41a
43604c6
41c00ad
c47a41a
b0a4b77
 
 
 
 
 
 
 
11cb1d3
 
b0a4b77
4eabcab
 
b0a4b77
 
d66d160
 
 
c47a41a
d66d160
 
 
3ce0ef7
d66d160
d7d1270
b676b33
 
 
c47a41a
 
b0a4b77
11cb1d3
 
 
4eabcab
 
 
b0a4b77
 
 
 
 
 
 
 
 
 
 
 
 
 
9163025
b0a4b77
 
 
 
 
 
 
 
 
 
 
 
41c00ad
0ef83dc
 
 
2a965e1
 
 
 
 
0ef83dc
 
 
 
 
 
 
 
808eb6f
0ef83dc
 
 
 
 
a7f36d2
f911137
 
 
 
 
 
59c096a
bbf3945
b0a4b77
3dc23d6
b0a4b77
59c096a
41c00ad
c47a41a
 
41c00ad
 
98ec703
c47a41a
 
 
 
 
 
41c00ad
 
c47a41a
 
d7d1270
c47a41a
 
e2924dd
d7d1270
5669825
d7d1270
c47a41a
ff7eae9
 
 
 
 
59c096a
ff7eae9
 
 
808eb6f
 
5669825
808eb6f
ff7eae9
59c096a
57b9cb8
 
c47a41a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
from transformers import ViTConfig, ViTForImageClassification
from transformers import ViTFeatureExtractor
from PIL import Image
import requests
import matplotlib.pyplot as plt
import gradio as gr
from gradio.mix import Parallel
from transformers import ImageClassificationPipeline, PerceiverForImageClassificationConvProcessing, PerceiverFeatureExtractor
from transformers import VisionEncoderDecoderModel
from transformers import AutoTokenizer
import torch
from transformers import (
    AutoModelForCausalLM,
    LogitsProcessorList,
    MinLengthLogitsProcessor,
    StoppingCriteriaList,
    MaxLengthCriteria,
)

# https://github.com/NielsRogge/Transformers-Tutorials/blob/master/HuggingFace_vision_ecosystem_overview_(June_2022).ipynb
# option 1: load with randomly initialized weights (train from scratch)

#tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
#model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")


config = ViTConfig(num_hidden_layers=12, hidden_size=768)
model = ViTForImageClassification(config)

#print(config)

feature_extractor = ViTFeatureExtractor()
# or, to load one that corresponds to a checkpoint on the hub:
#feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224")

#the following gets called by classify_image() 
feature_extractor = PerceiverFeatureExtractor.from_pretrained("deepmind/vision-perceiver-conv")
model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")
#google/vit-base-patch16-224, deepmind/vision-perceiver-conv
image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor)

def create_story(text_seed):
  #tokenizer = AutoTokenizer.from_pretrained("gpt2")
  #model = AutoModelForCausalLM.from_pretrained("gpt2")
  
  #eleutherAI gpt-3 based
  tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
  model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M")

  # set pad_token_id to eos_token_id because GPT2 does not have a EOS token
  model.config.pad_token_id = model.config.eos_token_id

  #input_prompt = "It might be possible to"
  input_prompt = text_seed
  input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids

  # instantiate logits processors
  logits_processor = LogitsProcessorList(
    [
        MinLengthLogitsProcessor(10, eos_token_id=model.config.eos_token_id),
    ]
  )
  stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=100)])

  outputs = model.greedy_search(
    input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria
  )

  result_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
  return result_text






def self_caption(image):
  repo_name = "ydshieh/vit-gpt2-coco-en"
  #test_image = "cats.jpg"
  test_image = image
  #url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
  #test_image = Image.open(requests.get(url, stream=True).raw)
  #test_image.save("cats.png")
  
  feature_extractor2 = ViTFeatureExtractor.from_pretrained(repo_name)
  tokenizer = AutoTokenizer.from_pretrained(repo_name)
  model2 = VisionEncoderDecoderModel.from_pretrained(repo_name)
  pixel_values = feature_extractor2(test_image, return_tensors="pt").pixel_values
  print("Pixel Values")
  print(pixel_values)
  # autoregressively generate text (using beam search or other decoding strategy)
  generated_ids = model2.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True)
  
  # decode into text
  preds = tokenizer.batch_decode(generated_ids[0], skip_special_tokens=True)
  preds = [pred.strip() for pred in preds]
  print("Predictions")
  print(preds)
  print("The preds type is : ",type(preds))
  pred_keys = ["Prediction"]
  pred_value = preds

  pred_dictionary = dict(zip(pred_keys, pred_value))
  print("Pred dictionary")
  print(pred_dictionary)
  #return(pred_dictionary)
  preds = ' '.join(preds)
  story = create_story(preds)
  story = ' '.join(story)
  return story


def classify_image(image):
  results = image_pipe(image)
  
  print("RESULTS")
  print(results)
  # convert to format Gradio expects
  output = {}
  for prediction in results:
    predicted_label = prediction['label']
    score = prediction['score']
    output[predicted_label] = score
  print("OUTPUT")
  print(output)
  return output


image = gr.inputs.Image(type="pil")
label = gr.outputs.Label(num_top_classes=5)
examples = [ ["cats.jpg"], ["batter.jpg"],["drinkers.jpg"] ] 
title = "Generate a Story from an Image"
description = "Demo for classifying images with Perceiver IO. To use it, simply upload an image and click 'submit', a story is autogenerated as well"
article = "<p style='text-align: center'></p>"

img_info1 = gr.Interface(
    fn=classify_image,
    inputs=image,
    outputs=label,
)

img_info2 = gr.Interface(
    fn=self_caption,
    inputs=image,
    #outputs=label,
    outputs = [
    gr.outputs.Textbox(label = 'Story')
],
)

Parallel(img_info1,img_info2, inputs=image, title=title, description=description, examples=examples, enable_queue=True).launch(debug=True)
#Parallel(img_info1,img_info2, inputs=image, outputs=label, title=title, description=description, examples=examples, enable_queue=True).launch(debug=True)