autonomous019 commited on
Commit
7e72612
1 Parent(s): 04c6deb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -46
app.py CHANGED
@@ -84,42 +84,7 @@ def inference(input_sentence, max_length, sample_or_greedy, seed=42):
84
  "",
85
  )
86
  '''
87
- return generation
88
-
89
-
90
-
91
-
92
-
93
- def create_story(text_seed):
94
- #tokenizer = AutoTokenizer.from_pretrained("gpt2")
95
- #model = AutoModelForCausalLM.from_pretrained("gpt2")
96
-
97
- #eleutherAI gpt-3 based
98
- tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
99
- model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M")
100
-
101
- # set pad_token_id to eos_token_id because GPT2 does not have a EOS token
102
- model.config.pad_token_id = model.config.eos_token_id
103
-
104
- #input_prompt = "It might be possible to"
105
- input_prompt = text_seed
106
- input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids
107
-
108
- # instantiate logits processors
109
- logits_processor = LogitsProcessorList(
110
- [
111
- MinLengthLogitsProcessor(10, eos_token_id=model.config.eos_token_id),
112
- ]
113
- )
114
- stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=100)])
115
-
116
- outputs = model.greedy_search(
117
- input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria
118
- )
119
-
120
- result_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
121
- return result_text
122
-
123
 
124
 
125
 
@@ -127,12 +92,7 @@ def create_story(text_seed):
127
 
128
  def self_caption(image):
129
  repo_name = "ydshieh/vit-gpt2-coco-en"
130
- #test_image = "cats.jpg"
131
  test_image = image
132
- #url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
133
- #test_image = Image.open(requests.get(url, stream=True).raw)
134
- #test_image.save("cats.png")
135
-
136
  feature_extractor2 = ViTFeatureExtractor.from_pretrained(repo_name)
137
  tokenizer = AutoTokenizer.from_pretrained(repo_name)
138
  model2 = VisionEncoderDecoderModel.from_pretrained(repo_name)
@@ -154,12 +114,11 @@ def self_caption(image):
154
  pred_dictionary = dict(zip(pred_keys, pred_value))
155
  print("Pred dictionary")
156
  print(pred_dictionary)
157
- #return(pred_dictionary)
158
  preds = ' '.join(preds)
159
  #inference(input_sentence, max_length, sample_or_greedy, seed=42)
160
  story = inference(preds, 32, "Sample", 42)
161
- #story = create_story(preds)
162
- #story = ' '.join(story)
163
  return story
164
 
165
 
@@ -200,7 +159,7 @@ label = gr.outputs.Label(num_top_classes=5)
200
  #examples = [ ["cats.jpg"], ["batter.jpg"],["drinkers.jpg"] ]
201
  examples = [ ["batter.jpg"] ]
202
  title = "Generate a Story from an Image"
203
- description = "Demo for classifying images with Perceiver IO. To use it, simply upload an image and click 'submit', a story is autogenerated as well"
204
  article = "<p style='text-align: center'></p>"
205
 
206
  img_info1 = gr.Interface(
@@ -219,5 +178,5 @@ img_info2 = gr.Interface(
219
  )
220
 
221
  Parallel(img_info1,img_info2, inputs=image, title=title, description=description, examples=examples, enable_queue=True).launch(debug=True)
222
- #Parallel(img_info1,img_info2, inputs=image, outputs=label, title=title, description=description, examples=examples, enable_queue=True).launch(debug=True)
223
 
 
84
  "",
85
  )
86
  '''
87
+ return input_sentence + generation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
 
90
 
 
92
 
93
  def self_caption(image):
94
  repo_name = "ydshieh/vit-gpt2-coco-en"
 
95
  test_image = image
 
 
 
 
96
  feature_extractor2 = ViTFeatureExtractor.from_pretrained(repo_name)
97
  tokenizer = AutoTokenizer.from_pretrained(repo_name)
98
  model2 = VisionEncoderDecoderModel.from_pretrained(repo_name)
 
114
  pred_dictionary = dict(zip(pred_keys, pred_value))
115
  print("Pred dictionary")
116
  print(pred_dictionary)
117
+
118
  preds = ' '.join(preds)
119
  #inference(input_sentence, max_length, sample_or_greedy, seed=42)
120
  story = inference(preds, 32, "Sample", 42)
121
+
 
122
  return story
123
 
124
 
 
159
  #examples = [ ["cats.jpg"], ["batter.jpg"],["drinkers.jpg"] ]
160
  examples = [ ["batter.jpg"] ]
161
  title = "Generate a Story from an Image"
162
+ description = "Demo for classifying images with Perceiver IO. To use it, simply upload an image and click 'submit', a story is autogenerated as well, story generated using Bigscience/BLOOM"
163
  article = "<p style='text-align: center'></p>"
164
 
165
  img_info1 = gr.Interface(
 
178
  )
179
 
180
  Parallel(img_info1,img_info2, inputs=image, title=title, description=description, examples=examples, enable_queue=True).launch(debug=True)
181
+
182