rsinghpatawat commited on
Commit
f99ed4e
β€’
1 Parent(s): 01a34b3

push changes

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -8
  2. README.md +6 -6
  3. app.py +182 -0
  4. requirements.txt +3 -0
.gitattributes CHANGED
@@ -2,27 +2,20 @@
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
  *.onnx filter=lfs diff=lfs merge=lfs -text
17
  *.ot filter=lfs diff=lfs merge=lfs -text
18
  *.parquet filter=lfs diff=lfs merge=lfs -text
19
  *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
  *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
  *.tflite filter=lfs diff=lfs merge=lfs -text
@@ -30,5 +23,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
30
  *.wasm filter=lfs diff=lfs merge=lfs -text
31
  *.xz filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
- *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
 
5
  *.ftz filter=lfs diff=lfs merge=lfs -text
6
  *.gz filter=lfs diff=lfs merge=lfs -text
7
  *.h5 filter=lfs diff=lfs merge=lfs -text
8
  *.joblib filter=lfs diff=lfs merge=lfs -text
9
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
10
  *.model filter=lfs diff=lfs merge=lfs -text
11
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
12
  *.onnx filter=lfs diff=lfs merge=lfs -text
13
  *.ot filter=lfs diff=lfs merge=lfs -text
14
  *.parquet filter=lfs diff=lfs merge=lfs -text
15
  *.pb filter=lfs diff=lfs merge=lfs -text
 
 
16
  *.pt filter=lfs diff=lfs merge=lfs -text
17
  *.pth filter=lfs diff=lfs merge=lfs -text
18
  *.rar filter=lfs diff=lfs merge=lfs -text
 
19
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
  *.tar.* filter=lfs diff=lfs merge=lfs -text
21
  *.tflite filter=lfs diff=lfs merge=lfs -text
 
23
  *.wasm filter=lfs diff=lfs merge=lfs -text
24
  *.xz filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
- title: LM Meets HF
3
- emoji: πŸ’©
4
- colorFrom: green
5
- colorTo: red
6
  sdk: gradio
7
- sdk_version: 3.19.1
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Language Model Meets Hugging Face
3
+ emoji: πŸ“–πŸ€πŸ€—
4
+ colorFrom: gray
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 3.0.10
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ import openai
4
+ import re
5
+ import random
6
+ import os
7
+
8
+ openai.api_key = os.environ["OAI_key"]
9
+
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer
11
+ import torch
12
+
13
+ dialog_tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
14
+ dialog_model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
15
+
16
+ # Latent Diffusion
17
+ latent_diff_interface = gr.Interface.load("spaces/multimodalart/latentdiffusion", api_key="secret", enable_queue=True)
18
+
19
+ # Background Remover
20
+ background_remove_interface = gr.Interface.load("spaces/nateraw/background-remover", api_key="secret", enable_queue=True)
21
+
22
+ def is_generate_image(input):
23
+ """ Return True if user wants to generate an image """
24
+
25
+ response = openai.Completion.create(
26
+ engine="text-davinci-002",
27
+ prompt="Prompt: Generate an image of a lion.\nQuestion: Generate an image? True or False?\nAnswer:True.\n\nPrompt: Remove background of the image.\nQuestion: Generate an image? True or False?\nAnswer:False.\n\nPrompt:" + input + "\nQuestion: Generate an image? True or False\nAnswer:",
28
+ temperature=0.7,
29
+ max_tokens=256,
30
+ top_p=1,
31
+ frequency_penalty=0,
32
+ presence_penalty=0
33
+ )
34
+
35
+ response = re.sub(r'[^A-Za-z]', '', response["choices"][-1]["text"])
36
+
37
+ return response
38
+
39
+ def generate_image(input):
40
+
41
+ """ Generate image using latent diffusion from the input prompt """
42
+
43
+ # Find out what image to generate?
44
+ input_prompt = openai.Completion.create(
45
+ engine="text-davinci-002",
46
+ prompt="Input: Generate an image of a \"lion\"\nQuestion: What is the prompt for image generation?\nAnswer: lion\n\nInput: I am feeling good. Please send me an image of the sky with mountains by Greg Rutkowski.\nQuestion: What is the prompt for image generation?\nAnswer: sky with mountains by Greg Rutkowski\n\nInput: " + input + "\nQuestion: What is the prompt for image generation?\nAnswer:",
47
+ temperature=0.7,
48
+ max_tokens=256,
49
+ top_p=1,
50
+ frequency_penalty=0,
51
+ presence_penalty=0
52
+ )
53
+
54
+ text_prompt_for_image = input_prompt["choices"][-1]["text"]
55
+ image_output = latent_diff_interface(text_prompt_for_image,45,256,256,1,5)
56
+ text_response_for_image = "Generated image for: " + text_prompt_for_image
57
+
58
+ return image_output[0], text_response_for_image
59
+
60
+ def remove_background(input, generated_image=None, generate_image_flag="False"):
61
+ # Check if user is asking for removing background.
62
+ response = openai.Completion.create(
63
+ engine="text-davinci-002",
64
+ prompt="Prompt: remove background.\nQuestion: Remove the background? True or False?\nAnswer: True\n\nPrompt: Remove the background of image.\nQuestion: Remove the background? True or False?\nAnswer: True\n\nPrompt: " + input + "\nQuestion: Remove the background? True or False?\nAnswer:",
65
+ temperature=0.7,
66
+ max_tokens=256,
67
+ top_p=1,
68
+ frequency_penalty=0,
69
+ presence_penalty=0
70
+ )
71
+
72
+ response = re.sub(r'[^A-Za-z]', '', response["choices"][-1]["text"])
73
+
74
+ if response == str(True) and generated_image != None:
75
+ # Remove background
76
+ rb_image_output = background_remove_interface(generated_image, 100)
77
+ else:
78
+ rb_image_output = None
79
+
80
+ return rb_image_output
81
+
82
+ def random_HF_space(input):
83
+
84
+ response = openai.Completion.create(
85
+ engine="text-davinci-002",
86
+ prompt="User: I am feeling bored.\nQuestion: Is the user bored? True or False?\nAnswer: True\n\nUser: " + input + "\nQuestion: Is the user bored? True or False?\nAnswer:",
87
+ temperature=0.7,
88
+ max_tokens=256,
89
+ top_p=1,
90
+ frequency_penalty=0,
91
+ presence_penalty=0
92
+ )
93
+
94
+ spaces_list = ["<a href='https://huggingface.co/spaces/akhaliq/AnimeGANv2' target='_blank'> Click here for a suprise space </a>",
95
+ "<a href='https://huggingface.co/spaces/dalle-mini/dalle-mini' target='_blank'> Click here for a suprise space </a>",
96
+ "<a href='https://huggingface.co/spaces/lfqa/lfqa' target='_blank'> Click here for a suprise space </a>",
97
+ "<a href='https://huggingface.co/spaces/ronvolutional/ai-pokemon-card' target='_blank'> Click here for a suprise space </a>",
98
+ "<a href='https://huggingface.co/spaces/flax-community/chef-transformer' target='_blank'> Click here for a suprise space </a>",
99
+ "<a href='https://huggingface.co/spaces/osanseviero/draw_to_search' target='_blank'> Click here for a suprise space </a>"]
100
+
101
+ response = re.sub(r'[^A-Za-z]', '', response["choices"][-1]["text"])
102
+
103
+ if response == str(True):
104
+ response = spaces_list[random.randint(0,5)]
105
+ else:
106
+ response = None
107
+
108
+ return response
109
+
110
+
111
+ def DialogGPT(input, history=[], text_response_for_image = None):
112
+
113
+ new_user_input_ids = dialog_tokenizer.encode(input + dialog_tokenizer.eos_token, return_tensors='pt')
114
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
115
+
116
+ if text_response_for_image != None:
117
+ text_response_for_image = dialog_tokenizer.encode(text_response_for_image + dialog_tokenizer.eos_token, return_tensors='pt')
118
+ history = torch.cat([bot_input_ids, text_response_for_image], dim=-1).tolist()
119
+
120
+ else:
121
+ history = dialog_model.generate(bot_input_ids, max_length=1000, pad_token_id=dialog_tokenizer.eos_token_id).tolist()
122
+
123
+ response = dialog_tokenizer.decode(history[0]).split("<|endoftext|>")
124
+ response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)]
125
+ return response, history
126
+
127
+
128
+ def predict(input, history=[]):
129
+
130
+ rs_output = None
131
+ generated_image = None
132
+ generated_text = None
133
+
134
+ if len(history)>1:
135
+ previously_generated_image = history[-1][0]
136
+ previously_generated_image_flag = history[-2][0]
137
+ history = [history[-3]]
138
+
139
+ # Check if user is asking for removing background
140
+ generated_image = remove_background(input, previously_generated_image, previously_generated_image_flag)
141
+
142
+ if generated_image != None:
143
+ # Output as per removed background
144
+ text_response_for_image = "Background removed."
145
+ generated_text, history = DialogGPT(input, history, text_response_for_image)
146
+
147
+ else:
148
+
149
+ # Check if user is asking for generating image
150
+ generate_image_flag = is_generate_image(input)
151
+
152
+ if generate_image_flag == str(True):
153
+ # Generate Image
154
+ generated_image, text_response_for_image = generate_image(input)
155
+ generated_text, history = DialogGPT(input, history, text_response_for_image)
156
+
157
+ # Append history with generated image and generated image flag
158
+ history.append([generate_image_flag])
159
+ history.append([generated_image])
160
+
161
+ else:
162
+ # Check if user is bored
163
+ rs_output = random_HF_space(input)
164
+ if rs_output != None:
165
+ # Output as per random space
166
+ rs_text_input = "Try out the below space"
167
+ generated_text, history = DialogGPT(input, history, rs_text_input)
168
+ else:
169
+ # Dialog GPT output
170
+ generated_text, history = DialogGPT(input, history)
171
+
172
+ return generated_text, history, rs_output, generated_image
173
+
174
+ examples = [['Who is Elon Musk?'],['Generate an image of horse running in grassland.'], ['Remove the background of the image.'], ['I am bored.']]
175
+ description = "Combines the sanity of Langauge models with HF spaces to create a unique experience. The chatbot is capable of understanding the user input for image generation, image background removal & a random space link if you are bored. For other inputs, the space uses DialogGPT-large."
176
+ article = "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=gradio-blocks_LM_meets_HF) \nHead on to <a href='https://huggingface.co/spaces/Gradio-Blocks/LM_meets_HF/discussions?type=discussion' target='_blank'>discussions</a> if you have any feedback."
177
+
178
+
179
+ gr.Interface(fn=predict,
180
+ inputs=["text", "state"],
181
+ outputs=["chatbot", "state", gr.outputs.HTML(), gr.outputs.Image()], title="Language Model Meets Hugging Face",
182
+ examples= examples, description=description, article=article).launch(debug=True, enable_queue=True)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ transformers
3
+ openai