shuttie commited on
Commit
d78207e
0 Parent(s):

initial commit

Browse files
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.zst filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .venv
2
+ .mypy_cache
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ tags:
6
+ - text
7
+ pretty_name: Amazon ESCI dataset in nixietune format
8
+ size_categories:
9
+ - "100K<n<1M"
10
+ source_datasets:
11
+ - Amazon ESCI
12
+ task_categories:
13
+ - sentence-similarity
14
+ dataset_info:
15
+ config_name: default
16
+ features:
17
+ - name: query
18
+ dtype: string
19
+ - name: pos
20
+ sequence: string
21
+ - name: neg
22
+ sequence: string
23
+ - name: negscore
24
+ sequence: float
25
+ splits:
26
+ - name: train
27
+ num_bytes: 361711993
28
+ num_examples: 74589
29
+ - name: test
30
+ num_bytes: 109820429
31
+ num_examples: 22398
32
+ - name: test_1k
33
+ num_bytes: 109820429
34
+ num_examples: 22398
35
+ train-eval-index:
36
+ - config: default
37
+ task: sentence-similarity
38
+ splits:
39
+ train_split: train
40
+ eval_split: test
41
+ configs:
42
+ - config_name: default
43
+ data_files:
44
+ - split: train
45
+ path: "data/train/*"
46
+ - split: test
47
+ path: "data/test/*"
48
+ - split: test_1k
49
+ path: "data/test_1k/*"
50
+ ---
51
+
52
+ # Amazon ESCI dataset
53
+
54
+ A dataset in a [nixietune](https://github.com/nixiesearch/nixietune) compatible format:
55
+
56
+ ```json
57
+ {
58
+ {
59
+ "query": "# cellist thats not a hashtag",
60
+ "pos": "Funny Cellists That's Not A Hashtag Music Sweatshirt",
61
+ "neg": [
62
+ "Feelin Good Tees My Opinion Offended You Adult Humor T Shirt XL Black",
63
+ "Christian Faith & Cross T-Shirt - Christian Faith T Shirts T-Shirt",
64
+ "PLUS PLUS - 240 Piece Basic Mix - Construction Building Stem/Steam Toy, Mini Puzzle Blocks for Kids",
65
+ "Caution I Learned to Drive Through Video Games - Funny Gamer T-Shirt",
66
+ "People Who Tolerate Me On A Daily Basis T Shirt L Black",
67
+ ]
68
+ }
69
+ ```
70
+
71
+ This is the expanded version of the [Amazon ESCI small-en](https://github.com/amazon-science/esci-data) dataset:
72
+ * can be loaded with [HF datasets](https://huggingface.co/docs/datasets/index) directly.
73
+
74
+ ## Usage
75
+
76
+ ```python
77
+ from datasets import load_dataset
78
+
79
+ data = load_dataset('nixiesearch/amazon-esci', split="train")
80
+ ```
81
+
82
+ ## License
83
+
84
+ Apache 2.0
convert.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import polars as pl
2
+ import sys
3
+ import json
4
+ from tqdm import tqdm
5
+
6
+ labelmap = {"E": 1.0, "S": 0.1, "C": 0.01, "I": 0.0}
7
+ split = sys.argv[3]
8
+
9
+ products = (
10
+ pl.read_parquet(sys.argv[1])
11
+ .filter((pl.col("product_locale") == "us"))
12
+ .with_columns(
13
+ pl.concat_str(
14
+ [
15
+ pl.col("product_title"),
16
+ pl.col("product_description"),
17
+ pl.col("product_bullet_point"),
18
+ pl.col("product_brand"),
19
+ pl.col("product_color"),
20
+ ],
21
+ separator=" ",
22
+ ignore_nulls=True,
23
+ ).alias("text")
24
+ )
25
+ .select(pl.col("product_id", "text"))
26
+ )
27
+ examples = (
28
+ pl.read_parquet(sys.argv[2])
29
+ .filter(
30
+ (pl.col("product_locale") == "us")
31
+ & (pl.col("small_version") == 1)
32
+ & (pl.col("split") == split)
33
+ )
34
+ .with_columns(
35
+ pl.col("esci_label").replace(labelmap).alias("score").cast(pl.Float64)
36
+ )
37
+ .select(pl.col("query_id", "query", "product_id", "score"))
38
+ )
39
+ merged = examples.join(products, on="product_id", how="left")
40
+
41
+ print(merged)
42
+
43
+ result = merged.group_by("query_id").agg(
44
+ pl.first("query"), pl.col("text"), pl.col("score")
45
+ )
46
+
47
+
48
+ def save_json(df: pl.DataFrame, path: str):
49
+ with open(path, "w") as f:
50
+ for row in tqdm(result.to_dicts(), desc=f"saving {path}"):
51
+ query = row["query"]
52
+ pos = []
53
+ neg = []
54
+ negscore = []
55
+ for doc, score in zip(row["text"], row["score"]):
56
+ if score == 1.0:
57
+ pos.append(doc)
58
+ else:
59
+ neg.append(doc)
60
+ negscore.append(score)
61
+ for p in pos:
62
+ line = json.dumps(
63
+ {"query": query, "pos": p, "neg": neg, "negscore": negscore}
64
+ )
65
+ f.write(line + "\n")
66
+
67
+
68
+ save_json(result, f"{split}.jsonl")
data/test/test.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdf6619caa009db3941e62cd84da9cd43081a5844ea2c8f32de8a765b958d93e
3
+ size 73921476
data/test_1k/test.json.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1e7ba00e480d7b467efa4ab2d0eb6c80dd4f4148135891bbc2981ab32ca7b02
3
+ size 949594
data/train/train.jsonl.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1d94c8b841f4e33a71ba9c0be527e651a16e8f94c02ad1fe60913ba3dfebe97
3
+ size 172057396
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ polars
2
+ tqdm