FoodDesert commited on
Commit
5903239
1 Parent(s): d1d1879

Upload predict_artist.ipynb

Browse files

Sample code for helping to choose an artist given a topic.

Files changed (1) hide show
  1. predict_artist.ipynb +314 -0
predict_artist.ipynb ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 2,
6
+ "id": "55c95870",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "from e621_utilities import construct_text_description_from_json_entry\n",
11
+ "import json\n",
12
+ "from math import log\n",
13
+ "import random\n",
14
+ "import numpy as np\n",
15
+ "from collections import Counter\n",
16
+ "\n",
17
+ "\n",
18
+ "IMAGE_COUNT=None\n",
19
+ "INPUT_JSONS=['D:/PythonExperiments/e621_high_score.json','D:/PythonExperiments/e621_low_score.json']\n",
20
+ "\n",
21
+ "\n",
22
+ "def score_post_log_favs(post):\n",
23
+ " return min(1.0, (log(int(post['fav_count'])+1) / 10))\n",
24
+ "\n",
25
+ "def load_tag_sets(data_list):\n",
26
+ " scores = []\n",
27
+ " text_descriptions = []\n",
28
+ " artists = []\n",
29
+ " for data in data_list:\n",
30
+ " text_description = construct_text_description_from_json_entry(data)\n",
31
+ " artist, text_description = extract_artist(text_description)\n",
32
+ " \n",
33
+ " score =score_post_log_favs(data)\n",
34
+ " score_int = round(score * 10)\n",
35
+ " text_description.append(f\"score:{score_int}\")\n",
36
+ " \n",
37
+ " text_descriptions.append(text_description)\n",
38
+ " artists.append(artist)\n",
39
+ " return text_descriptions, artists\n",
40
+ "\n",
41
+ "def load_data(input_json):\n",
42
+ " with open(input_json) as f:\n",
43
+ " data_list = json.load(f)[:IMAGE_COUNT] \n",
44
+ " # Load scores and tag sets from regular Python variables\n",
45
+ " return load_tag_sets(data_list)\n",
46
+ "\n",
47
+ "def extract_artist(tags):\n",
48
+ " for tag in tags:\n",
49
+ " if tag.startswith('by '):\n",
50
+ " tags.remove(tag)\n",
51
+ " return tag, tags\n",
52
+ " return None, tags\n",
53
+ "\n",
54
+ "#each of these variables is a list. Each element of the list represents one instance\n",
55
+ "#in text_descriptions, a single element is a list of strings, where each string is a tag associated with the instance.\n",
56
+ "#in scores, a single element is the score associated with an instance\n",
57
+ "text_descriptions = []\n",
58
+ "artists = []\n",
59
+ "for input_json in INPUT_JSONS:\n",
60
+ " sub_text_descriptions, sub_artists = load_data(input_json)\n",
61
+ " text_descriptions.extend(sub_text_descriptions)\n",
62
+ " artists.extend(sub_artists)\n"
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "code",
67
+ "execution_count": 3,
68
+ "id": "91c66b57",
69
+ "metadata": {},
70
+ "outputs": [
71
+ {
72
+ "name": "stdout",
73
+ "output_type": "stream",
74
+ "text": [
75
+ "Artist Count Before Filtering: 57134\n",
76
+ "Artist Count After Filtering: 698\n"
77
+ ]
78
+ }
79
+ ],
80
+ "source": [
81
+ "# Count the occurrences of each artist\n",
82
+ "artist_count = Counter(artists)\n",
83
+ "\n",
84
+ "# Filter the data to keep only artists with 100 or more occurrences\n",
85
+ "min_occurrences = 100\n",
86
+ "filtered_text_descriptions = []\n",
87
+ "filtered_artists = []\n",
88
+ "\n",
89
+ "for tags, artist in zip(text_descriptions, artists):\n",
90
+ " if artist_count[artist] >= min_occurrences:\n",
91
+ " filtered_text_descriptions.append(tags)\n",
92
+ " filtered_artists.append(artist)\n",
93
+ "\n",
94
+ "# Print the result\n",
95
+ "print(f\"Artist Count Before Filtering: {len(set(artists))}\")\n",
96
+ "print(f\"Artist Count After Filtering: {len(set(filtered_artists))}\")"
97
+ ]
98
+ },
99
+ {
100
+ "cell_type": "code",
101
+ "execution_count": 4,
102
+ "id": "acf35591",
103
+ "metadata": {},
104
+ "outputs": [],
105
+ "source": [
106
+ "from collections import defaultdict\n",
107
+ "from sklearn.feature_extraction.text import TfidfVectorizer\n",
108
+ "from sklearn.metrics.pairwise import cosine_similarity\n",
109
+ "\n",
110
+ "\n",
111
+ "# Combine the tags of all images for each artist\n",
112
+ "artist_tags = defaultdict(list)\n",
113
+ "for tags, artist in zip(filtered_text_descriptions, filtered_artists):\n",
114
+ " artist_tags[artist].extend(tags)\n",
115
+ "\n",
116
+ "# Compute the TF-IDF representation for each artist\n",
117
+ "vectorizer = TfidfVectorizer(token_pattern=r'[^,]+')\n",
118
+ "X_artist = vectorizer.fit_transform([','.join(tags) for tags in artist_tags.values()])\n",
119
+ "artist_names = list(artist_tags.keys())"
120
+ ]
121
+ },
122
+ {
123
+ "cell_type": "code",
124
+ "execution_count": null,
125
+ "id": "a232e088",
126
+ "metadata": {},
127
+ "outputs": [],
128
+ "source": [
129
+ "# Given a new image with a tag list (excluding the artist name)\n",
130
+ "new_image_tags = []\n",
131
+ "new_tags_string = \"airplane\"\n",
132
+ "new_image_tags.extend(tag.strip() for tag in new_tags_string.split(\",\"))\n",
133
+ "\n",
134
+ "unseen_tags = set(new_image_tags) - set(vectorizer.vocabulary_.keys())\n",
135
+ "print(f'Unseen Tags:{unseen_tags}')\n",
136
+ "\n",
137
+ "# Compute the TF-IDF representation for the new image\n",
138
+ "X_new_image = vectorizer.transform([','.join(new_image_tags)])\n",
139
+ "\n",
140
+ "# Compute the cosine similarity between the new image and each artist\n",
141
+ "similarities = cosine_similarity(X_new_image, X_artist)[0]\n",
142
+ "\n",
143
+ "# Rank the artists by their similarity scores and select the top 10\n",
144
+ "top_n = 20\n",
145
+ "\n",
146
+ "# Top artists\n",
147
+ "top_artist_indices = np.argsort(similarities)[-top_n:][::-1]\n",
148
+ "top_artists = [(artist_names[i], similarities[i]) for i in top_artist_indices]\n",
149
+ "\n",
150
+ "# Bottom artists\n",
151
+ "bottom_artist_indices = np.argsort(similarities)[:top_n]\n",
152
+ "bottom_artists = [(artist_names[i], similarities[i]) for i in bottom_artist_indices]\n",
153
+ "\n",
154
+ "# Get the artist names from the top_artists and bottom_artists lists\n",
155
+ "top_artist_names = [artist for artist, _ in top_artists]\n",
156
+ "bottom_artist_names = [artist for artist, _ in bottom_artists]\n",
157
+ "\n",
158
+ "# Print the top 10 artists with rank numbers and similarity scores\n",
159
+ "print(\"Top 10 artists:\")\n",
160
+ "for rank, (artist, score) in enumerate(top_artists, start=1):\n",
161
+ " print(f\"{rank}. {artist} - similarity score: {score:.4f}\")\n",
162
+ "\n",
163
+ "# Print the top 10 artists as a comma-separated list\n",
164
+ "print(\"\\nTop 10 artists:\", \", \".join(str(artist) for artist in top_artist_names))\n",
165
+ "\n",
166
+ "# Print the bottom 10 artists with rank numbers and similarity scores\n",
167
+ "print(\"\\nBottom 10 artists:\")\n",
168
+ "for rank, (artist, score) in enumerate(bottom_artists, start=1):\n",
169
+ " print(f\"{rank}. {artist} - similarity score: {score:.4f}\")\n",
170
+ "\n",
171
+ "# Print the bottom 10 artists as a comma-separated list\n",
172
+ "print(\"\\nBottom 10 artists:\", \", \".join(str(artist) for artist in bottom_artist_names))\n",
173
+ "\n"
174
+ ]
175
+ },
176
+ {
177
+ "cell_type": "code",
178
+ "execution_count": null,
179
+ "id": "8dbb05e8",
180
+ "metadata": {},
181
+ "outputs": [],
182
+ "source": [
183
+ "\n"
184
+ ]
185
+ },
186
+ {
187
+ "cell_type": "code",
188
+ "execution_count": 6,
189
+ "id": "9730cb16",
190
+ "metadata": {},
191
+ "outputs": [],
192
+ "source": [
193
+ "import pandas as pd\n",
194
+ "\n",
195
+ "def calculate_and_save_top_artists(tags, vectorizer, X_artist, artist_names, top_n):\n",
196
+ " for tag in tags:\n",
197
+ " new_image_tags = [tag.strip() for tag in tag.split(\",\")]\n",
198
+ "\n",
199
+ " # Compute the TF-IDF representation for the new image\n",
200
+ " X_new_image = vectorizer.transform([','.join(new_image_tags)])\n",
201
+ "\n",
202
+ " # Compute the cosine similarity between the new image and each artist\n",
203
+ " similarities = cosine_similarity(X_new_image, X_artist)[0]\n",
204
+ "\n",
205
+ " # Rank the artists by their similarity scores and select the top\n",
206
+ " top_artist_indices = np.argsort(similarities)[-top_n:][::-1]\n",
207
+ " top_artists = [(artist_names[i], similarities[i]) for i in top_artist_indices]\n",
208
+ "\n",
209
+ " # Create dataframes for artists and similarities\n",
210
+ " artist_df = pd.DataFrame({tag: [artist for artist, _ in top_artists]}).T\n",
211
+ " similarity_df = pd.DataFrame({tag: [f\"{artist}({round(similarity, 3)})\" for artist, similarity in top_artists]}).T\n",
212
+ "\n",
213
+ " # Append the data to csv files\n",
214
+ " artist_df.to_csv('top_artists.csv', mode='a', header=False)\n",
215
+ " similarity_df.to_csv('top_artists_similarity.csv', mode='a', header=False)\n",
216
+ "\n",
217
+ " \n",
218
+ "df = pd.read_csv('all_tags.csv')\n",
219
+ "unique_sorted_tags = df.iloc[:, 0].tolist()\n",
220
+ "# Use the function for all keys in the vocabulary\n",
221
+ "calculate_and_save_top_artists(unique_sorted_tags, vectorizer, X_artist, artist_names, 20)\n"
222
+ ]
223
+ },
224
+ {
225
+ "cell_type": "code",
226
+ "execution_count": 3,
227
+ "id": "d38f92b2",
228
+ "metadata": {},
229
+ "outputs": [
230
+ {
231
+ "name": "stdout",
232
+ "output_type": "stream",
233
+ "text": [
234
+ "Skipping tag ':3' due to invalid characters in the name.\n",
235
+ "Skipping tag ':<' due to invalid characters in the name.\n",
236
+ "Skipping tag ':d' due to invalid characters in the name.\n",
237
+ "Skipping tag ':o' due to invalid characters in the name.\n",
238
+ "Skipping tag '<3' due to invalid characters in the name.\n",
239
+ "Skipping tag '<3 censor' due to invalid characters in the name.\n",
240
+ "Skipping tag '<3 eyes' due to invalid characters in the name.\n",
241
+ "Skipping tag '<3 pupils' due to invalid characters in the name.\n",
242
+ "Skipping tag '?!' due to invalid characters in the name.\n",
243
+ "Skipping tag 'american dragon: jake long' due to invalid characters in the name.\n",
244
+ "Skipping tag 'dust: an elysian tail' due to invalid characters in the name.\n",
245
+ "Skipping tag 'five nights at freddy's: security breach' due to invalid characters in the name.\n",
246
+ "Skipping tag 'mao mao: heroes of pure heart' due to invalid characters in the name.\n",
247
+ "Skipping tag 'spirit: stallion of the cimarron' due to invalid characters in the name.\n"
248
+ ]
249
+ }
250
+ ],
251
+ "source": [
252
+ "import pandas as pd\n",
253
+ "import os\n",
254
+ "\n",
255
+ "# Load the csv file\n",
256
+ "df = pd.read_csv('top_artists.csv')\n",
257
+ "\n",
258
+ "# Directory to store the txt files\n",
259
+ "output_dir = 'e6ta'\n",
260
+ "os.makedirs(output_dir, exist_ok=True) # Make sure the directory exists\n",
261
+ "\n",
262
+ "# Characters that are not allowed in filenames\n",
263
+ "invalid_chars = ['/', '\\\\', ':', '*', '?', '\"', '<', '>', '|']\n",
264
+ "\n",
265
+ "# Loop through the DataFrame rows\n",
266
+ "for index, row in df.iterrows():\n",
267
+ " # Get the name for the file (replace spaces with '_')\n",
268
+ " filename = row[0].replace(' ', '_') + '.txt'\n",
269
+ " \n",
270
+ " # Check if the filename contains any invalid characters\n",
271
+ " if any(char in filename for char in invalid_chars):\n",
272
+ " print(f\"Skipping tag '{row[0]}' due to invalid characters in the name.\")\n",
273
+ " continue\n",
274
+ "\n",
275
+ " # Get the first 10 tags, ignore any that are just whitespace\n",
276
+ " tags = [str(tag).strip() for tag in row[1:11] if str(tag).strip()]\n",
277
+ "\n",
278
+ " # Create the txt file and write the tags\n",
279
+ " with open(os.path.join(output_dir, filename), 'w') as f:\n",
280
+ " f.write('\\n'.join(tags))\n",
281
+ " f.write('\\n') # Add a newline at the end of the file\n"
282
+ ]
283
+ },
284
+ {
285
+ "cell_type": "code",
286
+ "execution_count": null,
287
+ "id": "879f5463",
288
+ "metadata": {},
289
+ "outputs": [],
290
+ "source": []
291
+ }
292
+ ],
293
+ "metadata": {
294
+ "kernelspec": {
295
+ "display_name": "Python 3 (ipykernel)",
296
+ "language": "python",
297
+ "name": "python3"
298
+ },
299
+ "language_info": {
300
+ "codemirror_mode": {
301
+ "name": "ipython",
302
+ "version": 3
303
+ },
304
+ "file_extension": ".py",
305
+ "mimetype": "text/x-python",
306
+ "name": "python",
307
+ "nbconvert_exporter": "python",
308
+ "pygments_lexer": "ipython3",
309
+ "version": "3.10.9"
310
+ }
311
+ },
312
+ "nbformat": 4,
313
+ "nbformat_minor": 5
314
+ }