seanbenhur commited on
Commit
bd6d465
1 Parent(s): 46d7b13

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -29
app.py CHANGED
@@ -1,45 +1,44 @@
1
- import torch
2
- import re
3
  import gradio as gr
4
- from pathlib import Path
5
- from transformers import GPT2Tokenizer, AutoFeatureExtractor, VisionEncoderDecoderModel
6
-
7
- def predict(image, max_length=64, num_beams=4):
8
- image = image.convert('RGB')
9
- pixel_values = feature_extractor(images=image, return_tensors="pt").pixel_values
10
- pixel_values = pixel_values.to(device)
11
- with torch.no_grad():
12
- text = tokenizer.decode(model.generate(pixel_values.cpu())[0])
13
- text = text.replace('<|endoftext|>', '').split('\n')
14
- return text[0]
15
 
16
- model_path = "team-indain-image-caption/hindi-image-captioning"
17
- device = "cpu"
18
- # Load model.
19
- model = VisionEncoderDecoderModel.from_pretrained(model_path)
20
- model.to(device)
21
- print("Loaded model")
22
- feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k")
23
- print("Loaded feature_extractor")
24
- tokenizer = GPT2Tokenizer.from_pretrained(model_path)
25
- print("Loaded tokenizer")
26
- title = "Hindi Image Captioning"
27
- description = ""
 
 
 
 
 
 
 
 
 
28
 
29
  input = gr.inputs.Image(label="Image to search", type = 'pil', optional=False)
30
  output = gr.outputs.Textbox(type="auto",label="Captions")
31
 
32
- article = "This HuggingFace Space presents a demo for Image captioning in Hindi built with VIT Encoder and GPT2 Decoder"
33
 
34
- examples = [f"./example_{i}.jpg" for i in range(1,5)]
35
 
36
  interface = gr.Interface(
37
  fn=predict,
38
  inputs = input,
39
  theme="grass",
40
  outputs=output,
41
- examples = examples,
42
  title=title,
43
  description=article,
44
  )
45
- interface.launch()
 
1
+ import torch
2
+ import re
3
  import gradio as gr
4
+ from transformers import GPT2Tokenizer, AutoFeatureExtractor, VisionEncoderDecoderModel
5
+
 
 
 
 
 
 
 
 
 
6
 
7
+
8
+ encoder_checkpoint = 'google/vit-base-patch16-224'
9
+ decoder_checkpoint = 'surajp/gpt2-hindi'
10
+ model_checkpoint = 'team-indain-image-caption/hindi-image-captioning'
11
+ feature_extractor = ViTFeatureExtractor.from_pretrained(encoder_checkpoint)
12
+ tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint)
13
+ model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device)
14
+
15
+
16
+
17
+ def predict(image,max_length=64, num_beams=4):
18
+ image = image.convert('RGB')
19
+ image = feature_extractor(image, return_tensors="pt").pixel_values.to(device)
20
+ clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0]
21
+ caption_ids = model.generate(sample, max_length = max_length)[0]
22
+ print("*"*20)
23
+ print(caption_ids)
24
+ caption_text = clean_text(tokenizer.decode(caption_ids))
25
+ return caption_text
26
+
27
+
28
 
29
  input = gr.inputs.Image(label="Image to search", type = 'pil', optional=False)
30
  output = gr.outputs.Textbox(type="auto",label="Captions")
31
 
 
32
 
33
+ article = "This HuggingFace Space presents a demo for Image captioning in Hindi built with VIT Encoder and GPT2 Decoder"
34
 
35
  interface = gr.Interface(
36
  fn=predict,
37
  inputs = input,
38
  theme="grass",
39
  outputs=output,
40
+ # examples = examples,
41
  title=title,
42
  description=article,
43
  )
44
+ interface.launch(debug=True)