Datasets:

Languages:
English
Tags:
Not-For-All-Audiences
License:
Gaeros commited on
Commit
3b16b7a
1 Parent(s): 4918061

utils: tag normalization

Browse files
data/implications.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79d175bb5d744526905b660f41088c9ab074be060f0d710b68e68b549fe42632
3
+ size 234987
data/implications_rej.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdea2c2f93cf397fee184742ac285f9fdf8a334005754b9133ea472112bdb259
3
+ size 99047
e6db/utils/__init__.py CHANGED
@@ -2,11 +2,13 @@
2
  from pathlib import Path
3
  import gzip
4
  import json
 
 
5
 
6
  tag_categories = [
7
  "general",
8
  "artist",
9
- "", # Invalid catid
10
  "copyright",
11
  "character",
12
  "species",
@@ -19,7 +21,7 @@ tag_category2id = {v: k for k, v in enumerate(tag_categories) if v}
19
  tag_categories_colors = [
20
  "#b4c7d9",
21
  "#f2ac08",
22
- "black", # Invalid catid
23
  "#d0d",
24
  "#0a0",
25
  "#ed5d1f",
@@ -31,7 +33,7 @@ tag_categories_colors = [
31
  tag_categories_alt_colors = [
32
  "#2e76b4",
33
  "#fbd67f",
34
- "black", # Invalid catid
35
  "#ff5eff",
36
  "#2bff2b",
37
  "#f6b295",
@@ -43,6 +45,13 @@ tag_categories_alt_colors = [
43
 
44
 
45
  def load_tags(data_dir):
 
 
 
 
 
 
 
46
  data_dir = Path(data_dir)
47
  with gzip.open(data_dir / "tags.txt.gz", "rt") as fd:
48
  idx2tag = fd.read().split("\n")
@@ -53,3 +62,206 @@ def load_tags(data_dir):
53
  with gzip.open(data_dir / "tags_categories.bin.gz", "rb") as fp:
54
  tag_categories = fp.read()
55
  return tag2idx, idx2tag, tag_categories
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  from pathlib import Path
3
  import gzip
4
  import json
5
+ import warnings
6
+ from typing import Callable, Iterable
7
 
8
  tag_categories = [
9
  "general",
10
  "artist",
11
+ None, # Invalid catid
12
  "copyright",
13
  "character",
14
  "species",
 
21
  tag_categories_colors = [
22
  "#b4c7d9",
23
  "#f2ac08",
24
+ None, # Invalid catid
25
  "#d0d",
26
  "#0a0",
27
  "#ed5d1f",
 
33
  tag_categories_alt_colors = [
34
  "#2e76b4",
35
  "#fbd67f",
36
+ None, # Invalid catid
37
  "#ff5eff",
38
  "#2bff2b",
39
  "#f6b295",
 
45
 
46
 
47
  def load_tags(data_dir):
48
+ """
49
+ Load tag data, returns a tuple `(tag2idx, idx2tag, tag_categories)`
50
+
51
+ * `tag2idx`: dict mapping tag and aliases to numerical ids
52
+ * `idx2tag`: list mapping numerical id to tag string
53
+ * `tag_categories`: byte string mapping numerical id to categories
54
+ """
55
  data_dir = Path(data_dir)
56
  with gzip.open(data_dir / "tags.txt.gz", "rt") as fd:
57
  idx2tag = fd.read().split("\n")
 
62
  with gzip.open(data_dir / "tags_categories.bin.gz", "rb") as fp:
63
  tag_categories = fp.read()
64
  return tag2idx, idx2tag, tag_categories
65
+
66
+
67
+ def load_implications(data_dir):
68
+ """
69
+ Load implication mappings. Returns a tuple `(implications, implications_rej)`
70
+
71
+ * `implications`: dict mapping numerical ids to a list of implied numerical
72
+ ids
73
+ * `implications_rej`: dict mapping tag to a list of implied numerical ids
74
+ keys in implications_rej are tag that have a very little usage (less than 2
75
+ posts) and don't have numerical ids associated with them.
76
+ """
77
+ with gzip.open(data_dir / "implications.json.gz", "rb") as fp:
78
+ implications = json.load(fp)
79
+ implications = {int(k): v for k, v in implications.items()}
80
+ with gzip.open(data_dir / "implications_rej.json.gz", "rb") as fp:
81
+ implications_rej = json.load(fp)
82
+ return implications, implications_rej
83
+
84
+
85
+ MapFun = Callable[[str, int | None], str | list[str]]
86
+
87
+
88
+ #
89
+ # WARNING: this API is goofy and will chang soon
90
+ #
91
+ class TagNormalizer:
92
+ """
93
+ Map tag strings to numerical ids, and vice versa.
94
+
95
+ Multiple strings can be mapped to a single id, while each id map to a single
96
+ string. As a result, the encode/decode process can be used to normalize
97
+ tags.
98
+ """
99
+
100
+ def __init__(self, path_or_data: str | Path | tuple[dict, list, bytes]):
101
+ if isinstance(path_or_data, (Path, str)):
102
+ data = load_tags(path_or_data)
103
+ else:
104
+ data = path_or_data
105
+ self.tag2idx, self.idx2tag, self.tag_categories = data
106
+
107
+ def encode(self, tag: str, default=None):
108
+ "Convert tag string to numerical id"
109
+ return self.tag2idx.get(tag, default)
110
+
111
+ def decode(self, tag: int | str):
112
+ "Convert numerical id to tag string"
113
+ if isinstance(tag, str):
114
+ return tag
115
+ return self.idx2tag[tag]
116
+
117
+ def get_reverse_mapping(self):
118
+ """Return a list mapping id -> [ tag strings ]"""
119
+ res = [[] for i in range(len(self.idx2tag))]
120
+ for tag, tid in self.tag2idx.items():
121
+ res[tid].append(tag)
122
+ return res
123
+
124
+ def add_input_mappings(
125
+ self, tags: str | Iterable[str], to_tid: int | str, on_conflict="raise"
126
+ ):
127
+ """Associate tag strings to an id for recognition by `encode`
128
+
129
+ `on_conflict` defines what to do when the tag string is already mapped
130
+ to a different id:
131
+
132
+ * "raise": raise an ValueError (default)
133
+ * "warn": raise a warning
134
+ * "overwrite_rarest": make the tag point to the most frequently used tid
135
+ * "overwrite": silently overwrite the mapping
136
+ * "silent", or any other string: don't set the mapping
137
+ """
138
+ tag2idx = self.tag2idx
139
+ if not isinstance(to_tid, int):
140
+ to_tid = tag2idx[to_tid]
141
+ if isinstance(tags, str):
142
+ tags = (tags,)
143
+ for tag in tags:
144
+ conflict = tag2idx.get(tag, to_tid)
145
+ if conflict != to_tid:
146
+ msg = f"mapping {tag!r}->{self.idx2tag[to_tid]!r}({to_tid}) conflicts with previous mapping {tag!r}->{self.idx2tag[conflict]!r}({conflict})."
147
+ if on_conflict == "raise":
148
+ raise ValueError(msg)
149
+ elif on_conflict == "warn":
150
+ warnings.warn(msg)
151
+ elif on_conflict == "overwrite_rarest" and to_tid > conflict:
152
+ continue
153
+ elif on_conflict != "overwrite":
154
+ continue
155
+ tag2idx[tag] = to_tid
156
+
157
+ def rename_output(self, orig: int | str, dest: str):
158
+ """Change the tag string associated with an id. Used by `decode`."""
159
+ if not isinstance(orig, int):
160
+ orig = self.tag2idx[orig]
161
+ self.idx2tag[orig] = dest
162
+
163
+ def map_inputs(self, mapfun: MapFun, on_conflict="raise") -> "TagNormalizer":
164
+ res = type(self)(({}, self.idx2tag, self.tag_categories))
165
+ for tag, tid in self.tag2idx.items():
166
+ res.add_input_mappings(mapfun(tag, tid), tid, on_conflict=on_conflict)
167
+ return res
168
+
169
+ def map_outputs(self, mapfun: MapFun) -> "TagNormalizer":
170
+ idx2tag_gen = (mapfun(t, i) for i, t in enumerate(self.idx2tag))
171
+ idx2tag = [t if isinstance(t, str) else t[0] for t in idx2tag_gen]
172
+ return type(self)((self.tag2idx, idx2tag, self.tag_categories))
173
+
174
+ def get(self, key: int | str, default=None):
175
+ """
176
+ Returns the string tag associated with a numerical id, or conversely,
177
+ the id associated with a tag.
178
+ """
179
+ if isinstance(key, int):
180
+ idx2tag = self.idx2tag
181
+ if key >= len(idx2tag):
182
+ return default
183
+ return idx2tag[key]
184
+ return self.tag2idx.get(key, default)
185
+
186
+
187
+ class TagSetNormalizer:
188
+ def __init__(self, path_or_data: str | Path | tuple[TagNormalizer, dict, dict]):
189
+ if isinstance(path_or_data, (Path, str)):
190
+ data = TagNormalizer(path_or_data), *load_implications(path_or_data)
191
+ else:
192
+ data = path_or_data
193
+ self.tag_normalizer, self.implications, self.implications_rej = data
194
+
195
+ def map_implicaitons_rej(
196
+ self, mapfun: MapFun, on_conflict="raise"
197
+ ) -> "TagSetNormalizer":
198
+ implications_rej: dict[str, list[str]] = {}
199
+ for tag_string, implied_ids in self.implications_rej.items():
200
+ for new_tag_string in mapfun(tag_string, None):
201
+ conflict = implications_rej.get(new_tag_string, implied_ids)
202
+ if conflict != implied_ids:
203
+ msg = f"mapping {tag_string!r}->{implied_ids} conflicts with previous mapping {tag_string!r}->{conflict}."
204
+ if on_conflict == "raise":
205
+ raise ValueError(msg)
206
+ elif on_conflict == "warn":
207
+ warnings.warn(msg)
208
+ elif on_conflict != "overwrite":
209
+ continue
210
+ implications_rej[new_tag_string] = implied_ids
211
+
212
+ return type(self)((self.tag_normalizer, self.implications, implications_rej))
213
+
214
+ def map_tags(
215
+ self, mapfun: MapFun, map_input=True, map_output=True, on_conflict="raise"
216
+ ) -> "TagSetNormalizer":
217
+ """Apply a function to all tag strings.
218
+
219
+ The provided function will be run on:
220
+
221
+ * The of list output tag strings,
222
+ * Keys from the dictionary mapping strings to ids, contains canonical
223
+ tag and aliases,
224
+ * Implication source tags that are not used frequently enough to get an
225
+ id assigned (less than twice).
226
+
227
+ The function should return a list, where the first string is the
228
+ canonical tag used in the output, the others are additional aliases
229
+ used for recognizing the tag.
230
+ """
231
+ tag_normalizer = self.tag_normalizer
232
+ if map_input:
233
+ tag_normalizer = tag_normalizer.map_inputs(mapfun, on_conflict=on_conflict)
234
+ if map_output:
235
+ tag_normalizer = tag_normalizer.map_outputs(mapfun)
236
+ res = type(self)((tag_normalizer, self.implications, self.implications_rej))
237
+ if map_input:
238
+ res = res.map_implicaitons_rej(mapfun, on_conflict=on_conflict)
239
+ return res
240
+
241
+ def encode(self, tags: Iterable[str], keep_implied=False):
242
+ """
243
+ Encode a list of string as numerical ids and strip implied tags.
244
+
245
+ Unknown tags are returned as strings.
246
+
247
+ Returns :
248
+
249
+ * a list of tag ids and unknown tag strings,
250
+ * a list of implied tag ids.
251
+ """
252
+ implied = set()
253
+ res = []
254
+ for tag in tags:
255
+ tag = self.tag_normalizer.encode(tag, tag)
256
+ implied.update(
257
+ self.implications.get(tag, ())
258
+ if isinstance(tag, int)
259
+ else self.implications_rej.get(tag, ())
260
+ )
261
+ res.append(tag)
262
+ if not keep_implied:
263
+ res = [t for t in res if t not in implied]
264
+ return res, implied
265
+
266
+ def decode(self, tags):
267
+ return [self.tag_normalizer.decode(t) for t in tags]
notebooks/Normalize tags T2I dataset.ipynb ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "cfb2d5b1-bcc3-423f-959a-cc9070546c9f",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "%load_ext autoreload\n",
11
+ "%autoreload explicit\n",
12
+ "\n",
13
+ "from itertools import chain\n",
14
+ "from collections import Counter\n",
15
+ "from functools import cache\n",
16
+ "import re\n",
17
+ "from pathlib import Path\n",
18
+ "\n",
19
+ "\n",
20
+ "from prelib import data_dir\n",
21
+ "%aimport e6db.utils\n",
22
+ "from e6db.utils import TagSetNormalizer, tag_categories, tag_category2id"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "markdown",
27
+ "id": "e388984c-819f-4aac-8fcf-22c70d266b91",
28
+ "metadata": {},
29
+ "source": [
30
+ "# Create the mappings\n",
31
+ "\n",
32
+ "By default, the tag strings in this dataset are using the raw e621 format, with underscores. `TagNormalizer` enables renaming the tags and adding new spelling as aliases.\n",
33
+ "\n",
34
+ "The tag formating choices made here are specific to [this dataset](https://huggingface.co/datasets/k4d3/furry) meant for PDXL training with sd-scripts. You should adapt it to your own dataset, model and trainer."
35
+ ]
36
+ },
37
+ {
38
+ "cell_type": "code",
39
+ "execution_count": 2,
40
+ "id": "5d43e7c4-2216-4928-bb24-347fb287c07d",
41
+ "metadata": {},
42
+ "outputs": [],
43
+ "source": [
44
+ "cat_artist = e6db.utils.tag_category2id['artist']\n",
45
+ "cat_lore = e6db.utils.tag_category2id['lore']\n",
46
+ "\n",
47
+ "tagset_normalizer = e6db.utils.TagSetNormalizer(data_dir)\n",
48
+ "tagid2cat = tagset_normalizer.tag_normalizer.tag_categories\n",
49
+ "\n",
50
+ "@cache\n",
51
+ "def tag_mapfun(tag_underscores, tid):\n",
52
+ " \"\"\"\n",
53
+ " Maps raw e621 tags to more natural forms.\n",
54
+ " Will be run on:\n",
55
+ "\n",
56
+ " * The list of output tag strings,\n",
57
+ " * Keys from the dictionary mapping strings to ids, contains canonical tag and aliases,\n",
58
+ " * Implication source tags that are not used frequently enough to get an id.\n",
59
+ "\n",
60
+ " Returns a list, where the first string is the canonical tag used in the output,\n",
61
+ " the others are additional aliases used for recognizing the tag.\n",
62
+ " \"\"\"\n",
63
+ " cat = tagid2cat[tid] if tid is not None else -1\n",
64
+ " tag = tag_underscores.replace('_', ' ')\n",
65
+ " tags = [tag, tag_underscores]\n",
66
+ " if cat == cat_artist:\n",
67
+ " if not tag.startswith('by '):\n",
68
+ " # 'by ' is used in the output tags\n",
69
+ " tags.insert(0, f'by {tag.removesuffix(' (artist)')}')\n",
70
+ " if not tag.endswith('(artist)'):\n",
71
+ " artist = tag.removeprefix('by ')\n",
72
+ " tags.append(f'{artist} (artist)')\n",
73
+ " elif cat == cat_lore and not tag.endswith(' (lore)'):\n",
74
+ " tags.append(f'{tag} (lore)')\n",
75
+ "\n",
76
+ " escaped_parens = [t.replace('(', r'\\(').replace(')', r'\\)') for t in tags]\n",
77
+ " for t, ep in zip(tags[1:], escaped_parens[1:]):\n",
78
+ " if t != ep:\n",
79
+ " tags.append(ep)\n",
80
+ " if escaped_parens[0] != tags[0]:\n",
81
+ " tags.insert(0, escaped_parens[0]) # apparently sd-scripts require escaped parentheses \n",
82
+ " if ':' in tag: # Recognize tags where ':' were replaced by a space\n",
83
+ " tags.append(tag.replace(':', ' '))\n",
84
+ " return tags\n",
85
+ "\n",
86
+ "\n",
87
+ "# Note: uses a single function to map both tag recognition and tags in the output, but this will change\n",
88
+ "tagset_normalizer = tagset_normalizer.map_tags(tag_mapfun, on_conflict=\"overwrite_rarest\") \n",
89
+ "# on_conflict: use warn to debug conflicts. silent, overwrite, overwrite_rarest, warn, raise\n",
90
+ "del tag_mapfun # drop the cache\n",
91
+ "\n",
92
+ "# Add some underscores back\n",
93
+ "tag_normalizer = tagset_normalizer.tag_normalizer\n",
94
+ "tag_normalizer.rename_output('rating explicit', 'rating_explicit')\n",
95
+ "tag_normalizer.rename_output('rating questionable', 'rating_questionable')\n",
96
+ "tag_normalizer.rename_output('rating safe', 'rating_safe')\n",
97
+ "# Custom mappings\n",
98
+ "tag_normalizer.add_input_mappings('explicit', 'rating_explicit')\n",
99
+ "tag_normalizer.add_input_mappings('score_explicit', 'rating_explicit')\n",
100
+ "tag_normalizer.add_input_mappings('safe', 'rating_safe', on_conflict='overwrite')\n",
101
+ "tag_normalizer.add_input_mappings('score_safe', 'rating_safe')\n",
102
+ "tag_normalizer.add_input_mappings('questionable', 'rating_questionable', on_conflict='overwrite')\n",
103
+ "tag_normalizer.add_input_mappings('score_questionable', 'rating_questionable')"
104
+ ]
105
+ },
106
+ {
107
+ "cell_type": "code",
108
+ "execution_count": 3,
109
+ "id": "8ef27e6f-8bbf-4481-b5e9-7d2b9cd8dd99",
110
+ "metadata": {},
111
+ "outputs": [
112
+ {
113
+ "data": {
114
+ "text/plain": [
115
+ "\u001b[0;31mSignature:\u001b[0m\n",
116
+ "\u001b[0mtagset_normalizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmap_tags\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\u001b[0m\n",
117
+ "\u001b[0;34m\u001b[0m \u001b[0mmapfun\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mCallable\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mint\u001b[0m \u001b[0;34m|\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m \u001b[0;34m|\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
118
+ "\u001b[0;34m\u001b[0m \u001b[0mmap_input\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
119
+ "\u001b[0;34m\u001b[0m \u001b[0mmap_output\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
120
+ "\u001b[0;34m\u001b[0m \u001b[0mon_conflict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'raise'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
121
+ "\u001b[0;34m\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0;34m'TagSetNormalizer'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
122
+ "\u001b[0;31mDocstring:\u001b[0m\n",
123
+ "Apply a function to all tag strings.\n",
124
+ "\n",
125
+ "The provided function will be run on:\n",
126
+ "\n",
127
+ "* The of list output tag strings,\n",
128
+ "* Keys from the dictionary mapping strings to ids, contains canonical\n",
129
+ " tag and aliases,\n",
130
+ "* Implication source tags that are not used frequently enough to get an\n",
131
+ " id assigned (less than twice).\n",
132
+ "\n",
133
+ "The function should return a list, where the first string is the\n",
134
+ "canonical tag used in the output, the others are additional aliases\n",
135
+ "used for recognizing the tag.\n",
136
+ "\u001b[0;31mFile:\u001b[0m ~/code/e6db/e6db/utils/__init__.py\n",
137
+ "\u001b[0;31mType:\u001b[0m method"
138
+ ]
139
+ },
140
+ "metadata": {},
141
+ "output_type": "display_data"
142
+ }
143
+ ],
144
+ "source": [
145
+ "tagset_normalizer.map_tags?"
146
+ ]
147
+ },
148
+ {
149
+ "cell_type": "markdown",
150
+ "id": "554e0048-761a-494b-9573-4fb60d55ff98",
151
+ "metadata": {},
152
+ "source": [
153
+ "# Edit caption files\n",
154
+ "\n",
155
+ "Loads, normalize and remove implied tags from a files in a sd-script hierarchy of directories.\n",
156
+ "\n",
157
+ "Unknown tags are kept untouched.\n",
158
+ "\n",
159
+ "Specific to kade's format, captions are detected as tags ending with a period and are moved to the end of the file."
160
+ ]
161
+ },
162
+ {
163
+ "cell_type": "code",
164
+ "execution_count": 4,
165
+ "id": "497c2b51-4429-4d1b-93ef-b9c79193698f",
166
+ "metadata": {},
167
+ "outputs": [
168
+ {
169
+ "name": "stdout",
170
+ "output_type": "stream",
171
+ "text": [
172
+ "tags=[6, 5, 17, 'unknown tag'] implied={0, 15}\n"
173
+ ]
174
+ },
175
+ {
176
+ "data": {
177
+ "text/plain": [
178
+ "\u001b[0;31mSignature:\u001b[0m \u001b[0mtagset_normalizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mencode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtags\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mIterable\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkeep_implied\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
179
+ "\u001b[0;31mDocstring:\u001b[0m\n",
180
+ "Encode a list of string as numerical ids and strip implied tags.\n",
181
+ "\n",
182
+ "Unknown tags are returned as strings.\n",
183
+ "\n",
184
+ "Returns :\n",
185
+ "\n",
186
+ "* a list of tag ids and unknown tag strings,\n",
187
+ "* a list of implied tag ids.\n",
188
+ "\u001b[0;31mFile:\u001b[0m ~/code/e6db/e6db/utils/__init__.py\n",
189
+ "\u001b[0;31mType:\u001b[0m method"
190
+ ]
191
+ },
192
+ "metadata": {},
193
+ "output_type": "display_data"
194
+ }
195
+ ],
196
+ "source": [
197
+ "# We will use tagset_normalizer.encode()/decode() to normalize tags. Demo:\n",
198
+ "tags, implied = tagset_normalizer.encode(['solo', 'male', 'canine', 'mammal', 'unknown tag'])\n",
199
+ "print(f'{tags=} {implied=}')\n",
200
+ "\n",
201
+ "tagset_normalizer.decode(tags)\n",
202
+ "\n",
203
+ "tagset_normalizer.encode?"
204
+ ]
205
+ },
206
+ {
207
+ "cell_type": "code",
208
+ "execution_count": 5,
209
+ "id": "5200e13d-983c-463c-aa8e-33429515d63e",
210
+ "metadata": {},
211
+ "outputs": [],
212
+ "source": [
213
+ "RE_SEP = re.compile(r'[,\\n]') # Split on commas and newline\n",
214
+ "\n",
215
+ "dataset_root = Path('~/repos/kade/furry').expanduser()\n",
216
+ "output_dir = Path('/tmp/furry_fix')\n",
217
+ "#output_dir = dataset_root # clobber mode\n",
218
+ "\n",
219
+ "def load_caption(fp):\n",
220
+ " tags, captions = [], []\n",
221
+ " with open(fp, 'rt') as fd:\n",
222
+ " for chunk in RE_SEP.split(fd.read()):\n",
223
+ " chunk = chunk.strip()\n",
224
+ " if not chunk:\n",
225
+ " continue\n",
226
+ " if chunk.endswith('.'):\n",
227
+ " captions.append(chunk)\n",
228
+ " else:\n",
229
+ " tags.append(chunk)\n",
230
+ " return tags, captions\n",
231
+ "\n",
232
+ "\n",
233
+ "counter = Counter()\n",
234
+ "for file in chain(dataset_root.glob('**/*.txt'), dataset_root.glob('**/*.cap*')):\n",
235
+ " if 'sample-prompts' in file.name:\n",
236
+ " continue\n",
237
+ " tags, captions = load_caption(file)\n",
238
+ " orig_tags = tags\n",
239
+ " tags, implied = tagset_normalizer.encode(tags)\n",
240
+ " counter.update(tags)\n",
241
+ " tags = tagset_normalizer.decode(tags)\n",
242
+ " if tags == orig_tags:\n",
243
+ " continue\n",
244
+ "\n",
245
+ " output_file = output_dir / file.relative_to(dataset_root)\n",
246
+ " output_file.parent.mkdir(parents=True, exist_ok=True)\n",
247
+ " result = ', '.join(chain(tags, captions))\n",
248
+ " with open(output_file, 'wt') as fd:\n",
249
+ " fd.write(result)"
250
+ ]
251
+ },
252
+ {
253
+ "cell_type": "code",
254
+ "execution_count": 6,
255
+ "id": "d4741937-f1fd-4a1c-b3ed-9698e3d76a79",
256
+ "metadata": {},
257
+ "outputs": [
258
+ {
259
+ "name": "stdout",
260
+ "output_type": "stream",
261
+ "text": [
262
+ "solo count=10877 (e621:general)\n",
263
+ "rating_explicit count=9363 (e621:general)\n",
264
+ "anthro count=7274 (e621:general)\n",
265
+ "nude count=5894 (e621:general)\n",
266
+ "female count=4525 (e621:general)\n",
267
+ "hi res count=4378 (e621:meta)\n",
268
+ "digital media \\(artwork\\) count=4245 (e621:meta)\n",
269
+ "erection count=3952 (e621:general)\n",
270
+ "male count=3821 (e621:general)\n",
271
+ "looking at viewer count=3600 (e621:general)\n",
272
+ "duo count=3555 (e621:general)\n",
273
+ "balls count=3496 (e621:general)\n",
274
+ "tail count=3485 (e621:general)\n",
275
+ "nipples count=3474 (e621:general)\n",
276
+ "open mouth count=3264 (e621:general)\n",
277
+ "smile count=3089 (e621:general)\n",
278
+ "blush count=2956 (e621:general)\n",
279
+ "english text count=2939 (e621:meta)\n",
280
+ "rating_safe count=2764 (e621:general)\n",
281
+ "white fur count=2338 (e621:general)\n",
282
+ "furry count=2275 (e621:invalid)\n",
283
+ "blue eyes count=2271 (e621:general)\n",
284
+ "navel count=2252 (e621:general)\n",
285
+ "tongue out count=2160 (e621:general)\n",
286
+ "outside count=2069 (e621:general)\n",
287
+ "absurd res count=1986 (e621:meta)\n",
288
+ "standing count=1982 (e621:general)\n",
289
+ "muscular male count=1977 (e621:general)\n",
290
+ "breasts count=1957 (e621:general)\n",
291
+ "butt count=1947 (e621:general)\n",
292
+ "pussy count=1915 (e621:general)\n",
293
+ "simple background count=1910 (e621:general)\n",
294
+ "penis count=1866 (e621:general)\n",
295
+ "wolf count=1847 (e621:species)\n",
296
+ "brown fur count=1799 (e621:general)\n",
297
+ "claws count=1738 (e621:general)\n",
298
+ "abs count=1678 (e621:general)\n",
299
+ "animal ears count=1662 (e621:general)\n",
300
+ "male focus count=1653 (e621:general)\n",
301
+ "dialogue count=1602 (e621:general)\n",
302
+ "big breasts count=1557 (e621:general)\n",
303
+ "thick thighs count=1550 (e621:general)\n",
304
+ "anus count=1525 (e621:general)\n",
305
+ "furry male count=1511 (unknown)\n",
306
+ "eyes closed count=1486 (e621:general)\n",
307
+ "rating_questionable count=1480 (e621:general)\n",
308
+ "sitting count=1464 (e621:general)\n",
309
+ "pecs count=1452 (e621:general)\n",
310
+ "watersports count=1448 (e621:general)\n",
311
+ "horn count=1444 (e621:general)\n",
312
+ "uncensored count=1405 (e621:meta)\n",
313
+ "humanoid penis count=1392 (e621:general)\n",
314
+ "spread legs count=1343 (e621:general)\n",
315
+ "white background count=1335 (e621:general)\n",
316
+ "biped count=1334 (e621:general)\n",
317
+ "signature count=1322 (e621:meta)\n",
318
+ "fur count=1311 (e621:general)\n",
319
+ "long hair count=1228 (e621:general)\n",
320
+ "grey fur count=1223 (e621:general)\n",
321
+ "invalid tag count=1216 (e621:invalid)\n",
322
+ "detailed background count=1206 (e621:general)\n",
323
+ "teeth count=1178 (e621:general)\n",
324
+ "black fur count=1152 (e621:general)\n",
325
+ "urine stream count=1148 (e621:general)\n",
326
+ "clothed count=1137 (e621:general)\n",
327
+ "fangs count=1129 (e621:general)\n",
328
+ "male/female count=1101 (e621:general)\n",
329
+ "black nose count=1100 (e621:general)\n",
330
+ "fox count=1098 (e621:species)\n",
331
+ "peeing count=1082 (e621:general)\n",
332
+ "green eyes count=1058 (e621:general)\n",
333
+ "barefoot count=1040 (e621:general)\n",
334
+ "5 fingers count=1040 (e621:general)\n",
335
+ "toe claws count=981 (e621:general)\n",
336
+ "artist name count=980 (e621:meta)\n",
337
+ "looking back count=980 (e621:general)\n",
338
+ "monochrome count=980 (e621:meta)\n",
339
+ "paws count=971 (e621:general)\n",
340
+ "feral count=951 (e621:general)\n",
341
+ "inside count=948 (e621:general)\n",
342
+ "black hair count=940 (e621:general)\n",
343
+ "thighs count=922 (e621:invalid)\n",
344
+ "4 toes count=922 (e621:general)\n",
345
+ "brown hair count=920 (e621:general)\n",
346
+ "full-length portrait count=920 (e621:meta)\n",
347
+ "knot count=919 (e621:general)\n",
348
+ "big penis count=915 (e621:general)\n",
349
+ "red eyes count=913 (e621:general)\n",
350
+ "by kenket count=913 (e621:artist)\n",
351
+ "barazoku count=902 (e621:general)\n",
352
+ "on back count=900 (e621:general)\n",
353
+ "hair count=889 (e621:general)\n",
354
+ "orange fur count=881 (e621:general)\n",
355
+ "canine penis count=881 (e621:general)\n",
356
+ "gynomorph count=870 (e621:general)\n",
357
+ "by kenno arkkan count=868 (e621:artist)\n",
358
+ "male peeing count=865 (e621:general)\n",
359
+ "tongue count=852 (e621:general)\n",
360
+ "bottomless count=836 (e621:general)\n",
361
+ "interspecies count=833 (e621:general)\n"
362
+ ]
363
+ }
364
+ ],
365
+ "source": [
366
+ "# Shows tag frequencies in the dataset, not counting the occurrences of implied tags\n",
367
+ "for tag, count in counter.most_common(n=100):\n",
368
+ " if isinstance(tag, int):\n",
369
+ " tag_string = tagset_normalizer.tag_normalizer.decode(tag)\n",
370
+ " cat = tag_categories[tagset_normalizer.tag_normalizer.tag_categories[tag]]\n",
371
+ " print(f'{tag_string:<30} {count=} (e621:{cat})')\n",
372
+ " else:\n",
373
+ " print(f'{tag:<30} {count=} (unknown)')"
374
+ ]
375
+ },
376
+ {
377
+ "cell_type": "code",
378
+ "execution_count": 7,
379
+ "id": "2b9cf8a6-61e2-4806-99fb-f92403294f08",
380
+ "metadata": {},
381
+ "outputs": [
382
+ {
383
+ "name": "stdout",
384
+ "output_type": "stream",
385
+ "text": [
386
+ "furry male(1511), feet out of frame(306), furry sticker(306), furry with furry(290), animal focus(287), male pubic hair(251), furry with non-furry(243), wolf boy(243), gynomorph female(224), intersex intersex(140), male masturbation(125), gynomorph male(118), armpits(116), navel hair(107), by spaceengine(94), gynomorph gynomorph(91), male ambiguous(89), fox girl(87), tiger boy(87), photo \\(medium\\)(78), score explicit(63), clothed nude(60), greg rutkowski(54), five nights at freddy's security breach(53), blp(52), avery palmer(50), hamgas(48), bare pectorals(47), digimon \\(creature\\)(47), cum on boy(39), two-tone skin(39), non- breasts(35), animal hands(34), black male underwear(29), by jwst(29), leopard ears(28), non- balls(28), ground vehicle(27), cum on pectorals(25), white male underwear(25), female ambiguous(25), fox boy(24), by hubble(24), rabbit girl(22), andromorph male(22), painting \\(medium\\)(22), demon horns(19), herm male(19), webcomic character(19), lion boy(18), herm female(18), dharrel(18), fish boy(17), krystal(17), shower \\(place\\)(15), flame-tipped tail(15), tiger girl(14), genderswap \\(mtf\\)(14), sidepec(13), animal collar(13), heart-shaped chest(13), gynomorph herm(13), foot out of frame(12), animalization(12), In the picture(12), arrow \\(projectile\\)(11), naked shirt(11), colored tongue(11), herm herm(11), generation 9(11), planet pov(11), score safe(11), futa with female(10), futanari masturbation(10), ringtail(10), faux traditional media(10), ambiguous ambiguous(10), dock(9), dappled sunlight(8), scar on leg(8), fewer digits(8), webcomic(8), balls on floor(8), breasts squeezed together(7), goat girl(7), fishnets(7), pectoral cleavage(7), slingshot swimsuit(7), 6+boys(7), planted(7), white thighhighs(7), full-package futanari(7), red thighhighs(7), cowboy western(7), deviantart username(7), off on(7), regional form \\(\\)(7), score questionable(7), In a dimly lit room(7), In the background(7)\n"
387
+ ]
388
+ }
389
+ ],
390
+ "source": [
391
+ "# Top 100 of unknown tags:\n",
392
+ "print(', '.join([f'{k}({v})' for k,v in counter.most_common() if isinstance(k, str)][:100]))"
393
+ ]
394
+ }
395
+ ],
396
+ "metadata": {
397
+ "kernelspec": {
398
+ "display_name": "Python 3 (ipykernel)",
399
+ "language": "python",
400
+ "name": "python3"
401
+ },
402
+ "language_info": {
403
+ "codemirror_mode": {
404
+ "name": "ipython",
405
+ "version": 3
406
+ },
407
+ "file_extension": ".py",
408
+ "mimetype": "text/x-python",
409
+ "name": "python",
410
+ "nbconvert_exporter": "python",
411
+ "pygments_lexer": "ipython3",
412
+ "version": "3.12.3"
413
+ }
414
+ },
415
+ "nbformat": 4,
416
+ "nbformat_minor": 5
417
+ }