{ "cells": [ { "cell_type": "code", "execution_count": 2, "id": "55c95870", "metadata": {}, "outputs": [], "source": [ "from e621_utilities import construct_text_description_from_json_entry\n", "import json\n", "from math import log\n", "import random\n", "import numpy as np\n", "from collections import Counter\n", "\n", "\n", "IMAGE_COUNT=None\n", "INPUT_JSONS=['D:/PythonExperiments/e621_high_score.json','D:/PythonExperiments/e621_low_score.json']\n", "\n", "\n", "def score_post_log_favs(post):\n", " return min(1.0, (log(int(post['fav_count'])+1) / 10))\n", "\n", "def load_tag_sets(data_list):\n", " scores = []\n", " text_descriptions = []\n", " artists = []\n", " for data in data_list:\n", " text_description = construct_text_description_from_json_entry(data)\n", " artist, text_description = extract_artist(text_description)\n", " \n", " score =score_post_log_favs(data)\n", " score_int = round(score * 10)\n", " text_description.append(f\"score:{score_int}\")\n", " \n", " text_descriptions.append(text_description)\n", " artists.append(artist)\n", " return text_descriptions, artists\n", "\n", "def load_data(input_json):\n", " with open(input_json) as f:\n", " data_list = json.load(f)[:IMAGE_COUNT] \n", " # Load scores and tag sets from regular Python variables\n", " return load_tag_sets(data_list)\n", "\n", "def extract_artist(tags):\n", " for tag in tags:\n", " if tag.startswith('by '):\n", " tags.remove(tag)\n", " return tag, tags\n", " return None, tags\n", "\n", "#each of these variables is a list. Each element of the list represents one instance\n", "#in text_descriptions, a single element is a list of strings, where each string is a tag associated with the instance.\n", "#in scores, a single element is the score associated with an instance\n", "text_descriptions = []\n", "artists = []\n", "for input_json in INPUT_JSONS:\n", " sub_text_descriptions, sub_artists = load_data(input_json)\n", " text_descriptions.extend(sub_text_descriptions)\n", " artists.extend(sub_artists)\n" ] }, { "cell_type": "code", "execution_count": 3, "id": "91c66b57", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Artist Count Before Filtering: 57134\n", "Artist Count After Filtering: 698\n" ] } ], "source": [ "# Count the occurrences of each artist\n", "artist_count = Counter(artists)\n", "\n", "# Filter the data to keep only artists with 100 or more occurrences\n", "min_occurrences = 100\n", "filtered_text_descriptions = []\n", "filtered_artists = []\n", "\n", "for tags, artist in zip(text_descriptions, artists):\n", " if artist_count[artist] >= min_occurrences:\n", " filtered_text_descriptions.append(tags)\n", " filtered_artists.append(artist)\n", "\n", "# Print the result\n", "print(f\"Artist Count Before Filtering: {len(set(artists))}\")\n", "print(f\"Artist Count After Filtering: {len(set(filtered_artists))}\")" ] }, { "cell_type": "code", "execution_count": 4, "id": "acf35591", "metadata": {}, "outputs": [], "source": [ "from collections import defaultdict\n", "from sklearn.feature_extraction.text import TfidfVectorizer\n", "from sklearn.metrics.pairwise import cosine_similarity\n", "\n", "\n", "# Combine the tags of all images for each artist\n", "artist_tags = defaultdict(list)\n", "for tags, artist in zip(filtered_text_descriptions, filtered_artists):\n", " artist_tags[artist].extend(tags)\n", "\n", "# Compute the TF-IDF representation for each artist\n", "vectorizer = TfidfVectorizer(token_pattern=r'[^,]+')\n", "X_artist = vectorizer.fit_transform([','.join(tags) for tags in artist_tags.values()])\n", "artist_names = list(artist_tags.keys())" ] }, { "cell_type": "code", "execution_count": null, "id": "a232e088", "metadata": {}, "outputs": [], "source": [ "# Given a new image with a tag list (excluding the artist name)\n", "new_image_tags = []\n", "new_tags_string = \"airplane\"\n", "new_image_tags.extend(tag.strip() for tag in new_tags_string.split(\",\"))\n", "\n", "unseen_tags = set(new_image_tags) - set(vectorizer.vocabulary_.keys())\n", "print(f'Unseen Tags:{unseen_tags}')\n", "\n", "# Compute the TF-IDF representation for the new image\n", "X_new_image = vectorizer.transform([','.join(new_image_tags)])\n", "\n", "# Compute the cosine similarity between the new image and each artist\n", "similarities = cosine_similarity(X_new_image, X_artist)[0]\n", "\n", "# Rank the artists by their similarity scores and select the top 10\n", "top_n = 20\n", "\n", "# Top artists\n", "top_artist_indices = np.argsort(similarities)[-top_n:][::-1]\n", "top_artists = [(artist_names[i], similarities[i]) for i in top_artist_indices]\n", "\n", "# Bottom artists\n", "bottom_artist_indices = np.argsort(similarities)[:top_n]\n", "bottom_artists = [(artist_names[i], similarities[i]) for i in bottom_artist_indices]\n", "\n", "# Get the artist names from the top_artists and bottom_artists lists\n", "top_artist_names = [artist for artist, _ in top_artists]\n", "bottom_artist_names = [artist for artist, _ in bottom_artists]\n", "\n", "# Print the top 10 artists with rank numbers and similarity scores\n", "print(\"Top 10 artists:\")\n", "for rank, (artist, score) in enumerate(top_artists, start=1):\n", " print(f\"{rank}. {artist} - similarity score: {score:.4f}\")\n", "\n", "# Print the top 10 artists as a comma-separated list\n", "print(\"\\nTop 10 artists:\", \", \".join(str(artist) for artist in top_artist_names))\n", "\n", "# Print the bottom 10 artists with rank numbers and similarity scores\n", "print(\"\\nBottom 10 artists:\")\n", "for rank, (artist, score) in enumerate(bottom_artists, start=1):\n", " print(f\"{rank}. {artist} - similarity score: {score:.4f}\")\n", "\n", "# Print the bottom 10 artists as a comma-separated list\n", "print(\"\\nBottom 10 artists:\", \", \".join(str(artist) for artist in bottom_artist_names))\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "id": "8dbb05e8", "metadata": {}, "outputs": [], "source": [ "\n" ] }, { "cell_type": "code", "execution_count": 6, "id": "9730cb16", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "\n", "def calculate_and_save_top_artists(tags, vectorizer, X_artist, artist_names, top_n):\n", " for tag in tags:\n", " new_image_tags = [tag.strip() for tag in tag.split(\",\")]\n", "\n", " # Compute the TF-IDF representation for the new image\n", " X_new_image = vectorizer.transform([','.join(new_image_tags)])\n", "\n", " # Compute the cosine similarity between the new image and each artist\n", " similarities = cosine_similarity(X_new_image, X_artist)[0]\n", "\n", " # Rank the artists by their similarity scores and select the top\n", " top_artist_indices = np.argsort(similarities)[-top_n:][::-1]\n", " top_artists = [(artist_names[i], similarities[i]) for i in top_artist_indices]\n", "\n", " # Create dataframes for artists and similarities\n", " artist_df = pd.DataFrame({tag: [artist for artist, _ in top_artists]}).T\n", " similarity_df = pd.DataFrame({tag: [f\"{artist}({round(similarity, 3)})\" for artist, similarity in top_artists]}).T\n", "\n", " # Append the data to csv files\n", " artist_df.to_csv('top_artists.csv', mode='a', header=False)\n", " similarity_df.to_csv('top_artists_similarity.csv', mode='a', header=False)\n", "\n", " \n", "df = pd.read_csv('all_tags.csv')\n", "unique_sorted_tags = df.iloc[:, 0].tolist()\n", "# Use the function for all keys in the vocabulary\n", "calculate_and_save_top_artists(unique_sorted_tags, vectorizer, X_artist, artist_names, 20)\n" ] }, { "cell_type": "code", "execution_count": 3, "id": "d38f92b2", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Skipping tag ':3' due to invalid characters in the name.\n", "Skipping tag ':<' due to invalid characters in the name.\n", "Skipping tag ':d' due to invalid characters in the name.\n", "Skipping tag ':o' due to invalid characters in the name.\n", "Skipping tag '<3' due to invalid characters in the name.\n", "Skipping tag '<3 censor' due to invalid characters in the name.\n", "Skipping tag '<3 eyes' due to invalid characters in the name.\n", "Skipping tag '<3 pupils' due to invalid characters in the name.\n", "Skipping tag '?!' due to invalid characters in the name.\n", "Skipping tag 'american dragon: jake long' due to invalid characters in the name.\n", "Skipping tag 'dust: an elysian tail' due to invalid characters in the name.\n", "Skipping tag 'five nights at freddy's: security breach' due to invalid characters in the name.\n", "Skipping tag 'mao mao: heroes of pure heart' due to invalid characters in the name.\n", "Skipping tag 'spirit: stallion of the cimarron' due to invalid characters in the name.\n" ] } ], "source": [ "import pandas as pd\n", "import os\n", "\n", "# Load the csv file\n", "df = pd.read_csv('top_artists.csv')\n", "\n", "# Directory to store the txt files\n", "output_dir = 'e6ta'\n", "os.makedirs(output_dir, exist_ok=True) # Make sure the directory exists\n", "\n", "# Characters that are not allowed in filenames\n", "invalid_chars = ['/', '\\\\', ':', '*', '?', '\"', '<', '>', '|']\n", "\n", "# Loop through the DataFrame rows\n", "for index, row in df.iterrows():\n", " # Get the name for the file (replace spaces with '_')\n", " filename = row[0].replace(' ', '_') + '.txt'\n", " \n", " # Check if the filename contains any invalid characters\n", " if any(char in filename for char in invalid_chars):\n", " print(f\"Skipping tag '{row[0]}' due to invalid characters in the name.\")\n", " continue\n", "\n", " # Get the first 10 tags, ignore any that are just whitespace\n", " tags = [str(tag).strip() for tag in row[1:11] if str(tag).strip()]\n", "\n", " # Create the txt file and write the tags\n", " with open(os.path.join(output_dir, filename), 'w') as f:\n", " f.write('\\n'.join(tags))\n", " f.write('\\n') # Add a newline at the end of the file\n" ] }, { "cell_type": "code", "execution_count": null, "id": "879f5463", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.9" } }, "nbformat": 4, "nbformat_minor": 5 }