File size: 2,667 Bytes
59cb97a
 
 
 
140ec83
 
 
 
396f7e1
 
59cb97a
 
 
 
 
 
 
 
 
140ec83
59cb97a
 
 
 
 
140ec83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59cb97a
 
 
140ec83
 
 
 
1a094b9
cdc0a73
140ec83
 
 
59cb97a
140ec83
59cb97a
 
 
140ec83
 
59cb97a
 
8b59fe1
59cb97a
 
 
 
140ec83
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import torch 
import re 
import gradio as gr
from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel 
import cohere



 key_srkian = os.environ["key_srkian"]
 co = cohere.Client(key_srkian)#srkian
device='cpu'
encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
decoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
model_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
feature_extractor = ViTFeatureExtractor.from_pretrained(encoder_checkpoint)
tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint)
model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device)


def predict(department,image,max_length=64, num_beams=4):
  image = image.convert('RGB')
  image = feature_extractor(image, return_tensors="pt").pixel_values.to(device)
  clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0]
  caption_ids = model.generate(image, max_length = max_length)[0]
  caption_text = clean_text(tokenizer.decode(caption_ids))
  dept=department
  context= caption_text
  response = co.generate(
  model='large',
  prompt=f'create non offensive one line meme for given department and context\n\ndepartment- data science\ncontext-a man sitting on a bench with a laptop\nmeme- \"I\'m not a data scientist, but I play one on my laptop.\"\n\ndepartment-startup\ncontext-a young boy is smiling while using a laptop\nmeme-\"When your startup gets funded and you can finally afford a new laptop\"\n\ndepartment- {dept}\ncontext-{context}\nmeme-',
  max_tokens=20,
  temperature=0.8,
  k=0,
  p=0.75,
  frequency_penalty=0,
  presence_penalty=0,
  stop_sequences=["department"],
  return_likelihoods='NONE')
  reponse=response.generations[0].text
  reponse = reponse.replace("department", "")
  Feedback_SQL="DEPT"+dept+"CAPT"+caption_text+"MAMAY"+reponse
  
     
  return reponse 



# input = gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)
 
 
output = gr.outputs.Textbox(type="auto",label="Meme")
#examples = [f"example{i}.jpg" for i in range(1,7)]
#examples = os.listdir() 
description= "meme generation using advanced NLP "
title = "Meme world 🖼️"
dropdown=["data science ", "product management","marketing","startup" ,"agile","crypto" , "SEO" ]

article = "Created By :  Xaheen "

interface = gr.Interface(
        fn=predict,
        inputs = [gr.inputs.Dropdown(dropdown),gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)],
        
        theme="grass",
        outputs=output,
     #   examples = examples,
        title=title,
        description=description,
        article = article,
    )
interface.launch(debug=True)


# c0here2022