Yoann commited on
Commit
57a6212
1 Parent(s): 1b4c7f5

Initial commit

Browse files
.Rhistory ADDED
File without changes
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
- title: CLIPictionary
3
- emoji: 🐨
4
- colorFrom: gray
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 3.0.17
8
  app_file: app.py
9
- pinned: false
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: CLIPictionary!
3
+ emoji: ✏️
4
+ colorFrom: yellow
5
+ colorTo: yellow
6
  sdk: gradio
7
+ sdk_version: 3.0.12
8
  app_file: app.py
9
+ pinned: true
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
__pycache__/game.cpython-39.pyc ADDED
Binary file (3.94 kB). View file
 
__pycache__/model.cpython-39.pyc ADDED
Binary file (406 Bytes). View file
 
__pycache__/process.cpython-39.pyc ADDED
Binary file (3.59 kB). View file
 
__pycache__/sentence.cpython-39.pyc ADDED
Binary file (3.57 kB). View file
 
__pycache__/words.cpython-39.pyc ADDED
Binary file (1.47 kB). View file
 
app.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ import torch, torchvision, clip, random
3
+ import random, math, time, os
4
+ from PIL import Image
5
+ import gradio as gr
6
+ import numpy as np
7
+
8
+ from sentence import *
9
+ from game import *
10
+
11
+ ##### Initialize new game
12
+ title, _,_,var_dict = new_game(first_game=True)
13
+ var_dict["start_time"] = -1
14
+
15
+ ##### Display & Events
16
+ demo = gr.Blocks()
17
+ with demo:
18
+ ### All game variables are stored here
19
+ variables = gr.Variable(var_dict)
20
+ ### Target Sentence
21
+ title = gr.HTML(title)
22
+ ### Canvas & Prediction
23
+ with gr.Column():
24
+ with gr.Row():
25
+ image_input = gr.Image(image_mode='L', label="", show_label=False, source='canvas', shape=None, streaming=False, invert_colors=False, tool="editor")
26
+ with gr.Column():
27
+ html_pred = gr.HTML(value=getHTML(var_dict,""))
28
+ html_loading = gr.HTML("")
29
+ ### 'New Sentence' Button
30
+ button = gr.Button("New Sentence",variant="primary")
31
+ ### Informations
32
+ gr.HTML("<div style=\"display:block; height:30px;\"> </div>")
33
+ with gr.Row():
34
+ gr.HTML("<div style=\"display:block; position:relative; bottom:10%; border-top: 1px solid grey; padding:10px; \"><span style=\"font-size:30px;\">✏️</span><span style=\"font-size:40px; font-weight:bold;\">CLIPictionary</span><br>Made by <a href=\"https://yoann-lemesle.notion.site/Yoann-Lemesle-63b8120764284794b275d2967be710da\" style=\"text-decoration: underline;\">Yoann Lemesle</a> using OpenAI's <a href=\"https://github.com/openai/CLIP\" style=\"text-decoration: underline;\">CLIP model</a>.</div>")
35
+
36
+
37
+ ### Events
38
+ button.click(loading,inputs=html_loading,outputs=[title,html_pred,html_loading]) # Button -> triggers Loading
39
+ html_loading.change(new_game,inputs=[html_loading],outputs=[title,html_pred,image_input,variables]) # Loading -> triggers New game
40
+ image_input.change(process_img, inputs=[variables,image_input,title], outputs=[html_pred,title,variables])
41
+
42
+
43
+ demo.launch(share=False)
game.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, torchvision, clip, time, math
2
+ import matplotlib.pyplot as plt
3
+ from model import encoder_image
4
+ from sentence import *
5
+
6
+ ##### Get infos & cool facts to display during loadings
7
+ with open("infos.txt") as file:
8
+ infos = file.readlines()
9
+
10
+ ##### Get css
11
+ with open("style.css") as style:
12
+ css = "<style>"+ ''.join(style.readlines())+"</style>"
13
+
14
+ ##### 'LOADING' EVENT
15
+ def loading(html_loading=None):
16
+ ### This is just to make sure the content changes, which triggers the .change event which, itself, will launch a new game
17
+ if html_loading == "<div style=\"display:none;\">0</div>": new_value = "<div style=\"display:none;\">1</div>"
18
+ else: new_value = "<div style=\"display:none;\">0</div>"
19
+ ### Get a random tip
20
+ info = np.random.choice(infos)
21
+ ### Return TITLE, TIP TEXT, NEW HTML CONTENT, CANVAS IMG
22
+ return "<h1 id=\"loading\">⌛Loading...</h1>",css+"<div id=\"prediction\"><p id=\"infos\">"+info+"</p></div>",new_value
23
+
24
+ ##### 'NEW GAME' EVENT
25
+ def new_game(img=None,first_game=False):
26
+ print("\n----------Launching new game!")
27
+ var_dict = {
28
+ "start_time": time.time(),
29
+ "total_time": 0,
30
+ "found_words": [],
31
+ "target_sentence": "",
32
+ "guessed_sentence": "",
33
+ "parts": [],
34
+ "win": 0,
35
+ "step": 0,
36
+ "prev_steps": [],
37
+ "prev_norm": float("inf"),
38
+ "tip": "",
39
+ "loading": False,
40
+ "revertedState": False
41
+ }
42
+ target = iniSentence(var_dict,first_game=first_game)
43
+ ### Return TITLE, PREDICTION TEXT, CANVAS IMG, VAR DICT
44
+ return "<h1>"+target+"</h1>", getHTML(var_dict,""), None, var_dict
45
+
46
+ ##### PREDICTION TEXT HTML
47
+ def getHTML(var_dict,text,win=0):
48
+ ### Which parts of the sentence have been guessed?
49
+ guessed, not_guessed = "", ""
50
+ text_words = text.split(" ")
51
+ target_words = var_dict["target_sentence"].split(" ")
52
+ for i,word in enumerate(text_words):
53
+ if i < len(target_words) and word == target_words[i]: guessed += word + " "
54
+ else: not_guessed += word + " "
55
+ ### Display prediction
56
+ if win!=1:
57
+ html = "<p><span>"+guessed+"</span>"+not_guessed+"</p>"
58
+ else:
59
+ minutes, seconds = math.floor(var_dict["total_time"]/60), var_dict["total_time"]%60
60
+ if minutes < 1 and seconds <= 30: emoji = "🏆😍"
61
+ elif minutes < 1: emoji = "😄"
62
+ elif minutes < 2: emoji = "😐"
63
+ elif minutes < 3: emoji = "😓"
64
+ else: emoji = "😱"
65
+ time_str = "Total time: "+ ((str(minutes)+"m") if minutes>0 else "") + str(seconds)+"s "+emoji
66
+ html = "<p id=\"win\"><span>"+guessed+"</span><br>"+time_str+"</p>"
67
+ return css+"<div id=\"prediction\">"+html+"</div>"
68
+
69
+ ##### DRAWING PROCESSING & GAME STATE UPDATE
70
+ def process_img(var_dict,img,title):
71
+ # Makes sure that start_time is updates for the first game
72
+ if var_dict["start_time"] == -1:
73
+ var_dict["start_time"] = time.time()
74
+ if (None is img):
75
+ return getHTML(var_dict,"",win=0),"<h1>"+var_dict["target_sentence"]+"</h1>",var_dict
76
+ elif (None is not img) and (var_dict["win"] != 1):
77
+ print("-----Processing...")
78
+ part = var_dict["parts"][var_dict["step"]]
79
+ image = torch.tensor(img).float() / 255
80
+
81
+ ### Detect Cancel event
82
+ norm = torch.norm(image)
83
+ if norm > var_dict["prev_norm"]:
84
+ print("---Cancel Event")
85
+ prevState(var_dict)
86
+ var_dict["prev_norm"] = norm
87
+
88
+ ### Image preprocessing --> shape (224,224)
89
+ max_edge = max(image.shape[0],image.shape[1])
90
+ min_edge = min(image.shape[0],image.shape[1])
91
+ square_image = torch.ones(max_edge,max_edge)
92
+ pad = math.floor((max_edge - min_edge)/2)
93
+ if max_edge == image.shape[1]: square_image[pad:pad+min_edge,:] = image
94
+ else: square_image[:,pad:pad+min_edge] = image
95
+ image = torchvision.transforms.Resize((224,224))(square_image.unsqueeze(0)).repeat(1,3,1,1)
96
+
97
+ ### Computing cosine similarities (drawing<->text embeddings)
98
+ with torch.no_grad():
99
+ image_features = encoder_image(image)[0]
100
+ text_features = torch.tensor(part["embeddings"])
101
+ image_features /= image_features.norm()
102
+ similarities = torch.matmul(text_features,image_features)
103
+ probs = torch.nn.Softmax(dim=-1)(similarities)
104
+
105
+ ### Sort indexes by similarity
106
+ idxs = np.argsort(similarities)
107
+
108
+ ### Use top-3 preditions
109
+ top3_idxs = idxs[-3:]
110
+ classes = part["classes"]
111
+ preds = [classes[idx] for idx in top3_idxs]
112
+ print(f"Top-3 Predictions: {preds}")
113
+ print(f"Top-3 Probabilities: {probs[top3_idxs]}")
114
+
115
+ ### Check if win (-1: bad guess, 0:progress=guessed sentence part, 1:win=guessed whole sentence)
116
+ win = updateState(var_dict, preds)
117
+ if win == -1:
118
+ text = preds[-1]
119
+ elif win == 0:
120
+ part = var_dict["parts"][var_dict["step"]]
121
+ text = var_dict["guessed_sentence"] + link_text(part,"something") + " something"
122
+ elif win == 1:
123
+ text = var_dict["guessed_sentence"]
124
+ if var_dict["total_time"] == 0: var_dict["total_time"] = round(time.time() - var_dict["start_time"])
125
+ return getHTML(var_dict,text,var_dict["win"]),"<h1>"+var_dict["target_sentence"]+"</h1>",var_dict
126
+ else:
127
+ return getHTML(var_dict,var_dict["target_sentence"],win=1),"<h1>"+var_dict["target_sentence"]+"</h1>",var_dict
infos.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Tip: Filling shapes can help!
2
+ Tip: Drawing context elements can help!
3
+ Tip: Big drawings are more likely to work!
4
+ Tip: Adding a face to objects doing actions helps!
5
+ Tip: Add a musical note to help CLIP guess instruments!
6
+ Tip: Add a fork to help CLIP guess food!
7
+ Did you know? CLIP has multimodal neurons!
8
+ Did you know? The paper introducing CLIP has been published in 2021.
9
+ Did you know? CLIP stands for 'Contrastive Language-Image Pretraining'.
10
+ Did you know? DALL·E 2 can generate images from text using CLIP's embeddings!
11
+ Did you know? Typographic attacks (adding text to images) can change CLIP's predictions!
12
+ Did you know? CLIP has been trained on 400 millions (image,text) pairs!
model.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ ##### Initializes CLIP (from https://github.com/openai/CLIP)
2
+ import torch, torchvision, clip
3
+
4
+ device = "cuda" if torch.cuda.is_available() else "cpu"
5
+ model, preprocess = clip.load("ViT-B/32", device=device)
6
+
7
+ encoder_image = model.encode_image
8
+ encoder_text = model.encode_text
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ scipy
2
+ numpy
3
+ torch
4
+ ftfy
5
+ regex
6
+ tqdm
7
+ git+https://github.com/openai/CLIP.git
sentence.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from model import encoder_text
2
+ import torch, clip, random
3
+ import numpy as np
4
+ device = torch.device("cpu")
5
+
6
+ from words import words
7
+
8
+ ########## SENTENCE PART #######################################################
9
+ voyelles = ["a","e","i","o","u"]
10
+ links = list(words.keys())[1:]
11
+
12
+ def link_text(part,nextWord):
13
+ ### Check if we need to write "... a", "... an", "..."
14
+ if (len(part["link"]) > 0) and (part["link"][-1] == "a"):
15
+ voyelleStart = (nextWord[0] in voyelles)
16
+ plural = (nextWord[-1] == "s" and nextWord[-2] != "s") or (nextWord in ["nothing","hair","vampire teeth","something"])
17
+ else:
18
+ voyelleStart, plural = False, False
19
+ return (part["link"][:-2] if plural else part["link"] + ("n" if voyelleStart else ""))
20
+
21
+ def part_text(part):
22
+ l = link_text(part,part["word"])
23
+ return l + (" " if len(l)>0 else "") + part["word"]
24
+
25
+ def compute_embeddings(part,var_dict,prefix,batch_size=64):
26
+ target = part["word"]
27
+ possibleWords = list(set(words[part["link"]]) - set([target]+var_dict["found_words"]))
28
+ if len(possibleWords) > (batch_size-1): possibleWords = np.random.choice(list(possibleWords),batch_size-1,replace=False).tolist()
29
+ possibleWords.append(target)
30
+ ### Compute all classes & embeddings for current sentence part
31
+ part["classes"] = [prefix + link_text(part,w) + (" " if len(link_text(part,w))>0 else "") + w for w in possibleWords]
32
+ with torch.no_grad():
33
+ embeddings = encoder_text(clip.tokenize(part["classes"]).to(device))
34
+ embeddings /= embeddings.norm(dim=-1, keepdim=True)
35
+ part["embeddings"] = embeddings.tolist()
36
+
37
+ ########## SENTENCE ############################################################
38
+ def iniSentence(var_dict,input="",first_game=False):
39
+ var_dict["found_words"] = []
40
+ var_dict["parts"] = []
41
+ var_dict["step"] = 0
42
+ prefix = ""
43
+ N = 2
44
+
45
+ if first_game:
46
+ link = "a drawing of a"
47
+ part = {"link":link,"word":"cat","classes":[],"embeddings":[]}
48
+ var_dict["parts"].append(part)
49
+ compute_embeddings(part, var_dict, prefix)
50
+ prefix += part_text(part) + " "
51
+
52
+ link = "with a"
53
+ part = {"link":link,"word":"face","classes":[],"embeddings":[]}
54
+ var_dict["parts"].append(part)
55
+ compute_embeddings(part, var_dict, prefix)
56
+ prefix += part_text(part) + " "
57
+ else:
58
+ ##### Generating Random Sentence
59
+ link = "a drawing of a"
60
+ part = {"link":link,"word":np.random.choice(words[link]),"classes":[],"embeddings":[]}
61
+ var_dict["parts"].append(part)
62
+ compute_embeddings(part, var_dict, prefix)
63
+ prefix += part_text(part) + " "
64
+
65
+ for i in range(N-1):
66
+ link = np.random.choice(links)
67
+ part = {"link":link,"word":np.random.choice(words[link][1:]),"classes":[],"embeddings":[]}
68
+ var_dict["parts"].append(part)
69
+ compute_embeddings(part, var_dict, prefix)
70
+ prefix += part_text(part) + " "
71
+
72
+ var_dict["target_sentence"] = prefix[:-1] # Target sentence is prefix without the last space
73
+ setState(var_dict)
74
+ return var_dict["target_sentence"]
75
+
76
+ def prevState(var_dict):
77
+ if len(var_dict["prev_steps"]) > 0: var_dict["step"] = var_dict["prev_steps"].pop(-1)
78
+ else: var_dict["step"] = 0
79
+ var_dict["revertedState"] = True
80
+ setState(var_dict)
81
+
82
+ def setState(var_dict):
83
+ var_dict["found_words"] = var_dict["found_words"][:var_dict["step"]]
84
+ var_dict["guessed_sentence"] = ""
85
+ for i in range(var_dict["step"]):
86
+ var_dict["guessed_sentence"] += part_text(var_dict["parts"][i]) + " "
87
+
88
+ def updateState(var_dict, preds):
89
+ if not var_dict["revertedState"]: var_dict["prev_steps"].append(var_dict["step"])
90
+ else: var_dict["revertedState"] = False
91
+
92
+ ### Check if the current part has been guessed
93
+ part = var_dict["parts"][var_dict["step"]]
94
+
95
+ idx_of_nothing = -1
96
+ if ("nothing" in preds[0]): idx_of_nothing = 0
97
+ elif ("nothing" in preds[1]): idx_of_nothing = 1
98
+ elif ("nothing" in preds[2]): idx_of_nothing = 2
99
+
100
+ idx_of_guess = -1
101
+ if (part["classes"][-1] == preds[0]): idx_of_guess = 0
102
+ elif (part["classes"][-1] == preds[1]): idx_of_guess = 1
103
+ elif (part["classes"][-1] == preds[2]): idx_of_guess = 2
104
+
105
+ if not var_dict["win"] and idx_of_guess > idx_of_nothing:
106
+ var_dict["step"] += 1
107
+ var_dict["found_words"].append(part["word"])
108
+ var_dict["win"] = var_dict["step"] == len(var_dict["parts"])
109
+ setState(var_dict)
110
+ if var_dict["win"]: return 1
111
+ else: return 0
112
+ elif not var_dict["win"]: return -1
113
+ else: return 1
style.css ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ @-webkit-keyframes winAnim {
3
+ from { text-shadow:0px 0px 15px rgb(250,150,0,0); font-size:25px;}
4
+ to { text-shadow:0px 0px 15px rgb(250,150,0,0.75); font-size:30px;}
5
+ }
6
+
7
+ @keyframes winAnim {
8
+ from { text-shadow:0px 0px 15px rgb(250,150,0,0); font-size:25px;}
9
+ to { text-shadow:0px 0px 15px rgb(250,150,0,0.75); font-size:30px;}
10
+ }
11
+
12
+
13
+ #test{
14
+ font-weight:bold;
15
+ }
16
+
17
+ @-webkit-keyframes prediction {
18
+ from { opacity:0;}
19
+ to { opacity:1;}
20
+ }
21
+
22
+ @keyframes prediction {
23
+ from { opacity:0;}
24
+ to { opacity:1;}
25
+ }
26
+
27
+
28
+ div#prediction{
29
+ display:block;
30
+ width:45vw;
31
+ height:100%;
32
+ text-align:center;
33
+ margin-top:100px;
34
+ margin-left:auto;
35
+ margin-right: auto;
36
+ }
37
+
38
+ div#prediction p:not(#win){
39
+ font-size:25px;
40
+ display:inline-block;
41
+ margin:auto;
42
+ position:relative;
43
+ top:48%;
44
+ position-anchor: 50% 50%;
45
+ text-align:center;
46
+ transform: translate(0, -50%);
47
+ -webkit-animation: prediction 0.25s;
48
+ animation: prediction 0.25s;
49
+ }
50
+
51
+ div#prediction p#win{
52
+ font-weight:bold;
53
+ -webkit-animation: winAnim 0.5s;
54
+ animation: winAnim 0.5s;
55
+ font-size:30px;
56
+ text-shadow:0px 0px 15px rgb(250,150,0,0.75);
57
+ }
58
+
59
+ div#prediction p#infos{
60
+ font-weight:bold;
61
+ -webkit-animation: winAnim 0.5s;
62
+ animation: winAnim 0.5s;
63
+ font-size:30px;
64
+ text-shadow:0px 0px 15px rgb(250,150,0,0.75);
65
+ }
66
+
67
+
68
+ span{
69
+ font-weight:bold;
70
+ color:rgb(250,150,0);
71
+ }
72
+
73
+ h1{
74
+ display:block; font-size:30px; font-weight:bold; width:100%; text-align:center; margin-bottom:15px;
75
+ }
76
+
77
+
78
+ @-webkit-keyframes loading {
79
+ from { color:rgb(250,150,0,0.25);}
80
+ to { color:rgb(250,150,0,1);}
81
+ }
82
+
83
+ @keyframes loading {
84
+ from { color:rgb(250,150,0,0.25);}
85
+ to { color:rgb(250,150,0,1);}
86
+ }
87
+
88
+
89
+ h1#loading{
90
+ margin-bottom:15px;
91
+ display:block;
92
+ width:100%;
93
+ vertical-align: center;
94
+ text-align: center;
95
+
96
+ font-size:30px;
97
+ font-weight:bold;
98
+
99
+ -webkit-animation: loading 10s;
100
+ animation: loading 10s;
101
+ color:rgb(250,150,0,1);
102
+ }
103
+
104
+ @-webkit-keyframes processing {
105
+ from { opacity:1;}
106
+ to { opacity:0;}
107
+ }
108
+
109
+ @keyframes processing {
110
+ from { opacity:1;}
111
+ to { opacity:0;}
112
+ }
113
+
114
+
115
+ p#processing{
116
+
117
+ -webkit-animation: loading 3s;
118
+ animation: loading 3s;
119
+ color:rgb(250,150,0);
120
+ font-weight:bold;
121
+
122
+ font-size:50px;
123
+ display:block;
124
+ margin:auto;
125
+ text-align:center;
126
+ width:100%;
127
+
128
+ }
words.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ################################################################################
2
+ # A DRAWING OF A ...
3
+ shapes = ["triangle","square","circle","heart","star","diamond"]
4
+ animals = ["cat","dog","duck","bee","butterfly","bird","pig","cow","fish","frog","shark","snake","mouse","monkey","snail"]
5
+ objects = ["wine glass","eye","plane","spoon","basket ball","chair","pen","computer","hat","soccer ball","phone","sword","axe","umbrella","bell","dumbbell","scissors","fork","bag","clock","key","shopping cart","car","boat","house","mug","sun","moon","atom","hand"]
6
+ plants = ["tree","flower","leaf","palm tree","mushroom"] # I know that mushrooms are not plants stop coming to my house
7
+ food = ["donut","coconut","banana","apple","bottle","sausage","icecream","burger","egg","lollypop","pizza"]
8
+ instruments = ["drum","guitar","piano","flute","trumpet","accordion"]
9
+
10
+ # FEELING ...
11
+ feelings = ["neutral","happy","sad","angry","surprised","thirsty","sleepy","hungry","love","curious","evil"]
12
+
13
+ # WITH A...
14
+ attributes = ["face","mustache","muscles","vampire teeth","hair","eye","mouth","scar"]
15
+
16
+ # WEARING A...
17
+ clothings = ["glasses","sunglasses","hat","socks","eye patch","pants","tee-shirt","scarf"]
18
+
19
+ # ...
20
+ others = ["that is talking","that is dancing","that is singing","and its clone","at the beach","at a forest"]
21
+
22
+ words = {
23
+ "a drawing of a": shapes + animals + objects + instruments + plants + food,
24
+ "with a": ["nothing"]+attributes + clothings,
25
+ "wearing a": ["nothing"]+clothings,
26
+ "eating a": ["nothing"]+food,
27
+ "playing the": ["nothing"]+instruments,
28
+ "holding a": ["nothing"]+shapes + animals + objects + instruments + plants + food,
29
+ "feeling": ["nothing"]+feelings,
30
+ "": ["and nothing else"]+others
31
+ }
32
+ ################################################################################