pietrolesci commited on
Commit
94e0f09
·
verified ·
1 Parent(s): f2b9521

Create processing_script.py

Browse files
Files changed (1) hide show
  1. processing_script.py +110 -0
processing_script.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def download_civilcomments(data_path: str | Path) -> None:
2
+ print("Downloading CivilComments...")
3
+ civilcomments_dir = Path(data_path) / "civilcomments"
4
+ civilcomments_dir.mkdir(parents=True, exist_ok=True)
5
+ download_and_extract(
6
+ "https://worksheets.codalab.org/rest/bundles/0x8cd3de0634154aeaad2ee6eb96723c6e/contents/blob/",
7
+ str(civilcomments_dir / "civilcomments.tar.gz"),
8
+ )
9
+
10
+
11
+ def process_civilcomments(data_path: str | Path, dst: str | Path, keep_raw: bool = False) -> None:
12
+ print("Processing CivilComments...")
13
+ df = pd.read_csv(Path(data_path) / "civilcomments/all_data_with_identities.csv", index_col=0)
14
+ if keep_raw:
15
+ ds = Dataset.from_pandas(df, preserve_index=False)
16
+
17
+ # save locally
18
+ ds.save_to_disk(str(Path(dst) / "civilcomments-wilds" / "raw"))
19
+
20
+ # push to hub
21
+ ds.push_to_hub("<repo name>", "raw")
22
+
23
+ # extract labels, features, and metadata
24
+ input_output_vars = ["id", "split", "comment_text", "toxicity"]
25
+ identity_vars = ["male", "female", "LGBTQ", "christian", "muslim", "other_religions", "black", "white"]
26
+ auxiliary_vars = [
27
+ "identity_any",
28
+ "severe_toxicity",
29
+ "obscene",
30
+ "threat",
31
+ "insult",
32
+ "identity_attack",
33
+ "sexual_explicit",
34
+ ]
35
+
36
+ # remove instances where label or text is missing
37
+ df = df.loc[df[input_output_vars[-2:]].isna().sum(1) == 0]
38
+
39
+ # remove instances where label < 0
40
+ df = df.loc[df[input_output_vars[-1]] >= 0]
41
+
42
+ # keep only columns we need
43
+ df = df.loc[:, input_output_vars + identity_vars + auxiliary_vars]
44
+
45
+ # encode label and identity attributes
46
+ cols = [input_output_vars[-1]] + identity_vars + auxiliary_vars
47
+ df[cols] = (df[cols] >= 0.5).astype(int)
48
+
49
+ # fmt: off
50
+ # deduplicate
51
+ gdf = df.groupby("comment_text")[identity_vars + ["split", "toxicity"]].agg("nunique")
52
+ gdf["multiple"] = (gdf != 1).sum(1)
53
+
54
+ print(f"""
55
+ There are {df["comment_text"].duplicated().sum()} exact duplicates (i.e., same `comment_text`).
56
+ Of these, only {len(gdf.query("multiple > 0"))} are unique `comment_text`.
57
+
58
+ Some duplicates appear with different attributes and labels, and some even in multiple splits.
59
+ In particular,
60
+
61
+ {(gdf[identity_vars + ["split", "toxicity"]] > 1).sum()}
62
+ """)
63
+
64
+ # if duplicates it keeps:
65
+ # - the occurrence in the validation set, or
66
+ # - the one with higher toxicity, or
67
+ # - the one with higher identity_vars (in order they appears in the list)
68
+ # - the one with higher auxiliary_vars (in order they appears in the list)
69
+ print(f"Length before deduplication: {len(df)}")
70
+ df = (
71
+ df.sort_values(["comment_text", "split", "toxicity"] + identity_vars + auxiliary_vars, ascending=False)
72
+ .drop_duplicates(subset="comment_text", keep="first")
73
+ )
74
+ print(f"Length after deduplication: {len(df)}")
75
+
76
+ # add column with all identity attributes
77
+ df = (
78
+ df.assign(active_attributes=lambda _df: _df[identity_vars].values.tolist())
79
+ .assign(
80
+ active_attributes=lambda _df: _df["active_attributes"].map(
81
+ lambda lst: [name for idx, name in zip(lst, identity_vars, strict=True) if idx == 1]
82
+ )
83
+ )
84
+ )
85
+ # fmt: on
86
+
87
+ # add column to flag whether any active attribute is present
88
+ assert ((df[identity_vars].sum(1) != 0) == (df["active_attributes"].map(len) > 0)).all() # simple check
89
+ df["has_active_attrs"] = df[identity_vars].sum(1) != 0
90
+
91
+ # add unique identifier as first column
92
+ df["uid"] = list(range(len(df)))
93
+
94
+ # reorder columns nicely
95
+ cols = df.columns.tolist()
96
+ df = df[["uid"] + input_output_vars + ["has_active_attrs", "active_attributes"] + identity_vars + auxiliary_vars]
97
+
98
+ # convert to DatasetDict
99
+ ds_dict = {}
100
+ for split in df["split"].unique().tolist():
101
+ ds = Dataset.from_pandas(df.query(f"split == '{split}'").drop(columns=["split"]), preserve_index=False)
102
+ ds = ds.cast_column("toxicity", ClassLabel(num_classes=2, names=["non-toxic", "toxic"]))
103
+ ds_dict[split if split != "val" else "validation"] = ds
104
+ ds_dict = DatasetDict(ds_dict)
105
+
106
+ # save locally
107
+ ds_dict.save_to_disk(str(Path(dst) / "civilcomments-wilds" / "texts"))
108
+
109
+ # push to hub
110
+ ds_dict.push_to_hub("<repo name>", "default")