kusumakar commited on
Commit
b5e959f
1 Parent(s): 87e14b3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -15
app.py CHANGED
@@ -1,3 +1,8 @@
 
 
 
 
 
1
  import numpy as np
2
  from PIL import Image
3
  import streamlit as st
@@ -8,11 +13,6 @@ model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-cap
8
  feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
9
  tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
10
 
11
- # Load the pre-trained model and tokenizer
12
- model_name = "gpt2"
13
- tokenizer_1 = GPT2Tokenizer.from_pretrained(model_name)
14
- model_2 = GPT2LMHeadModel.from_pretrained(model_name)
15
-
16
  def generate_captions(image):
17
  image = Image.open(image).convert("RGB")
18
  generated_caption = tokenizer.decode(model.generate(feature_extractor(image, return_tensors="pt").pixel_values.to("cpu"))[0])
@@ -21,21 +21,27 @@ def generate_captions(image):
21
  generated_caption = sentence.replace(text_to_remove, "")
22
  return generated_caption
23
 
24
- # Define the Streamlit app
25
- def generate_paragraph(prompt):
26
- # Tokenize the prompt
27
- input_ids = tokenizer_1.encode(prompt, return_tensors="pt")
28
 
29
- # Generate the paragraph
30
- output = model_2.generate(input_ids, max_length=200, num_return_sequences=1, no_repeat_ngram_size=2, early_stopping=True)
 
 
 
 
 
 
 
31
 
32
- # Decode the generated output into text
33
- paragraph = tokenizer_1.decode(output[0], skip_special_tokens=True)
34
- return paragraph
35
 
 
 
36
  # create the Streamlit app
37
  def app():
38
- st.title('Image from your Side, Trending Hashtags from our Side')
39
 
40
  st.write('Upload an image to see what we have in store.')
41
 
 
1
+ import os
2
+ import openai
3
+ api_key = os.environ.get('OPENAI_API_KEY')
4
+ openai.api_key = api_key
5
+
6
  import numpy as np
7
  from PIL import Image
8
  import streamlit as st
 
13
  feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
14
  tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
15
 
 
 
 
 
 
16
  def generate_captions(image):
17
  image = Image.open(image).convert("RGB")
18
  generated_caption = tokenizer.decode(model.generate(feature_extractor(image, return_tensors="pt").pixel_values.to("cpu"))[0])
 
21
  generated_caption = sentence.replace(text_to_remove, "")
22
  return generated_caption
23
 
24
+ def generate_paragraph(caption):
25
+ prompt = "Generate a paragraph based on the following caption: " + caption
 
 
26
 
27
+ # Make the API call to GPT-3
28
+ response = openai.Completion.create(
29
+ engine='text-davinci-003', # Specify the GPT-3 model
30
+ prompt=prompt,
31
+ max_tokens=200, # Adjust the desired length of the generated text
32
+ n = 1, # Set the number of completions to generate
33
+ stop=None, # Specify an optional stop sequence
34
+ temperature=0.7 # Adjust the temperature for randomness (between 0 and 1)
35
+ )
36
 
37
+ # Extract the generated text from the API response
38
+ generated_text = response.choices[0].text.strip()
 
39
 
40
+ return generated_text
41
+
42
  # create the Streamlit app
43
  def app():
44
+ st.title('Image from your Side, Detailed description from my site')
45
 
46
  st.write('Upload an image to see what we have in store.')
47