shuttie commited on
Commit
8caeaa7
1 Parent(s): 7de88d1

initial commit

Browse files
README.md CHANGED
@@ -1,3 +1,73 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ tags:
6
+ - text
7
+ pretty_name: Reddit DadJokes dataset
8
+ size_categories:
9
+ - "100K<n<1M"
10
+ task_categories:
11
+ - text-generation
12
+ dataset_info:
13
+ config_name: default
14
+ features:
15
+ - name: input
16
+ dtype: string
17
+ - name: output
18
+ dtype: string
19
+ - name: instruction
20
+ dtype: string
21
+ splits:
22
+ - name: train
23
+ num_bytes: 2734101179
24
+ num_examples: 147753
25
+ train-eval-index:
26
+ - config: default
27
+ task: text-generation
28
+ splits:
29
+ train_split: train
30
+ configs:
31
+ - config_name: default
32
+ data_files:
33
+ - split: train
34
+ path: "data/train/*"
35
+ ---
36
+
37
+ # Reddit /r/DadJokes dataset
38
+
39
+ Dataset is based on a [semi-public Pushshift Reddit dataset](https://convokit.cornell.edu/documentation/subreddit.html).
40
+
41
+ ## Source data
42
+
43
+ The actual extract of all top-level posts on `/r/DadJokes` can be found in the `src/reddit_dadjokes.csv.gz` file. The raw post contents are very noisy, so we did a couple of steps to clean things up:
44
+
45
+ * Used a `Llama-8B-instruct` to split each dad joke into the `intro` and a `punchline` parts. See `src/parse.py` script for details.
46
+ * Removed too short and too long posts. Also removed all punchlines being emojis or URLs.
47
+ * There are a lot of duplicate submissions present, so we used the `intfloat/e5-base-v2` model for semantic deduplication.
48
+
49
+ ## Stats
50
+
51
+ 147753 jokes split to `intro` and `punchline` parts. Jokes are from 2014 till 2022.
52
+
53
+ ## Examples
54
+
55
+ To be Alpaca-format compatible we use the `Continue a dad joke:` instruction by default, but it's up to you for using something else.
56
+
57
+
58
+ ```json
59
+ {"input": "A grizzly kept talking to me", "output": "He was unbearable", "instruction": "Continue a dad joke:"}
60
+ {"input": "I rubbed mayonnaise on my eyes", "output": "Oh fuck oh shit it hurts please help this is no joke it pains fuck help me already", "instruction": "Continue a dad joke:"}
61
+ {"input": "What do you say to encourage an asteroid?", "output": "Go little rockstar.", "instruction": "Continue a dad joke:"}
62
+ {"input": "They always ask me why my mood is always negative", "output": "Just multiply by -1 \ud83d\ude09", "instruction": "Continue a dad joke:"}
63
+ {"input": "Recently I started working with horses", "output": "It's a stable job.", "instruction": "Continue a dad joke:"}
64
+ {"input": "My favorite word is \"drool\"", "output": "Just rolls off the tongue.", "instruction": "Continue a dad joke:"}
65
+ {"input": "Where\u2019s a dogs favorite place to eat", "output": "At Woofle House", "instruction": "Continue a dad joke:"}
66
+ {"input": "Why don't oysters share their pearls?", "output": "They're shellfish \ud83d\udc1a\ud83d\ude02!", "instruction": "Continue a dad joke:"}
67
+ {"input": "What do you call two guys hanging out above a window?", "output": "Kurt & Rod", "instruction": "Continue a dad joke:"}
68
+ {"input": "Me: I'm not saying a word without my lawyer present!", "output": "So where's my present?!", "instruction": "Continue a dad joke:"}
69
+ ```
70
+
71
+ ## License
72
+
73
+ Apache 2.0
data/train/train.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f650a8375a414240f7c76c2adf276561bc9e17ad3aab02f720796242b440678c
3
+ size 6901495
src/convert.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from argparse import ArgumentParser
2
+ import json
3
+ from tqdm import tqdm
4
+ from dataclasses import dataclass
5
+ from typing import List, Optional, Dict, Set
6
+ from sentence_transformers import SentenceTransformer
7
+ import numpy as np
8
+ import hnswlib
9
+
10
+ @dataclass
11
+ class Doc:
12
+ input: str
13
+ output: str
14
+
15
+ @staticmethod
16
+ def from_json(doc: Dict):
17
+ return Doc(input=doc['input'], output=doc['output'])
18
+
19
+
20
+ if __name__ == "__main__":
21
+ parser = ArgumentParser(prog="convert.py", description="dadjokes reddit CSV parser")
22
+ parser.add_argument("--data", action="store", help="path to input JSON file", required=True)
23
+ parser.add_argument("--out", action="store", help="path to output file", required=True)
24
+ parser.add_argument("--inst", action="store", help="alpaca instruction", required=True)
25
+
26
+ args = parser.parse_args()
27
+ print(args)
28
+
29
+ model = SentenceTransformer("intfloat/e5-base-v2",device="cuda")
30
+
31
+ with open(args.data, 'r') as input:
32
+ docs: List[Doc] = []
33
+ for line in tqdm(input.readlines()):
34
+ item = Doc.from_json(json.loads(line))
35
+ docs.append(item)
36
+ embeddings = model.encode([f"passage: {doc.input} {doc.output}" for doc in docs], batch_size=512, show_progress_bar=True)
37
+ p = hnswlib.Index(space = 'cosine', dim = 768)
38
+ print("building index")
39
+ p.init_index(max_elements = len(docs), ef_construction = 200, M = 16)
40
+ p.add_items(embeddings, [id for id, doc in enumerate(docs)])
41
+ print("computing similarity")
42
+ labels, distances = p.knn_query(embeddings, k = 10)
43
+ skips: Set[int] = set()
44
+ print("search done, exporting")
45
+ dupe_count = 0
46
+ broken_count = 0
47
+ with open(args.out,'w') as output:
48
+ for (index, doc), label_list, dist_list in zip(enumerate(docs), labels.tolist(), distances.tolist()):
49
+ if index not in skips:
50
+ if "http" not in doc.output:
51
+ jdoc = {"input": doc.input, "output": doc.output, "instruction": args.inst}
52
+ output.write(json.dumps(jdoc) + '\n')
53
+ else:
54
+ broken_count += 1
55
+ else:
56
+ dupe_count += 1
57
+ skips.add(index)
58
+ for label, dist in zip(label_list, dist_list):
59
+ if (dist < 0.07):
60
+ skips.add(label)
61
+
62
+ print(f"done: dupes={dupe_count} broken={broken_count}")
src/parse.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import (
2
+ pipeline,
3
+ )
4
+ import torch
5
+ import json
6
+ from transformers.pipelines.pt_utils import KeyDataset
7
+ from tqdm import tqdm
8
+ from datasets import Dataset
9
+ from argparse import ArgumentParser
10
+ from typing import Dict, List
11
+
12
+ # Reddit CSV dump parser script
13
+
14
+ def make_prompt_mistral(data: Dict[str, List]) -> Dict[str, List]:
15
+ prompt_template = "### Instruction:\n{instruct}:\n\n### Input:\n{input}\n\n### Response:\n"
16
+ result = []
17
+ for doc in data["joke"]:
18
+ prompt = prompt_template.format(
19
+ instruct="For the following joke, add a | separator between intro part and the punchline. "
20
+ "Do not change the sentence, only add a separator. "
21
+ "Full sencence should be considered a punchline. "
22
+ "A question is a full intro section, everything following a question must be considered punchline. "
23
+ "Do not repeat the punchline, do not change words in the sentence.",
24
+ input=doc,
25
+ )
26
+ result.append(prompt)
27
+ return {"prompt": result}
28
+
29
+
30
+ def make_prompt_llama3(data: Dict[str, List]) -> Dict[str, List]:
31
+ prompt_template = "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{instruct}<|eot_id|>\n<|start_header_id|>user<|end_header_id|>\n\n{input}<|eot_id|>\n<|start_header_id|>assistant<|end_header_id|>\n\n"
32
+ result = []
33
+ for doc in data["joke"]:
34
+ prompt = prompt_template.format(
35
+ instruct="For the following joke, add a | separator between intro part and the punchline. "
36
+ "Do not change the sentence, only add a separator. "
37
+ "Full sencence should be considered a punchline. "
38
+ "A question is a full intro section, everything following a question must be considered punchline. "
39
+ "Do not repeat the punchline, do not change words in the sentence. "
40
+ "Do not repeat this instruction, only output the result."
41
+ "Do not tell you're an assistant.",
42
+ input=doc[:256],
43
+ )
44
+ result.append(prompt)
45
+ return {"prompt": result}
46
+
47
+
48
+ if __name__ == "__main__":
49
+ parser = ArgumentParser(prog="batch_split", description="dadjokes")
50
+ parser.add_argument("--data", action="store", help="path to reddit.csv", required=True)
51
+ parser.add_argument("--out", action="store", help="path to out file", required=True)
52
+ args = parser.parse_args()
53
+ print(args)
54
+
55
+ dataset = Dataset.from_csv(args.data, split="train")
56
+ generator = pipeline(
57
+ task="text-generation",
58
+ # model="mistralai/Mistral-7B-Instruct-v0.3",
59
+ model="meta-llama/Meta-Llama-3-8B-Instruct",
60
+ torch_dtype=torch.bfloat16,
61
+ device_map="auto",
62
+ )
63
+ generator.tokenizer.pad_token_id = 128009
64
+
65
+ prompts = KeyDataset(dataset.map(function=make_prompt_llama3, batched=True), "prompt")
66
+ with open(args.out, "w") as f:
67
+ for result in tqdm(
68
+ generator(
69
+ prompts,
70
+ return_full_text=False,
71
+ max_new_tokens=128,
72
+ num_return_sequences=1,
73
+ batch_size=24,
74
+ ),
75
+ total=len(prompts),
76
+ ):
77
+ raw_text = result[0]["generated_text"].split("\n")
78
+ joke = raw_text[-1]
79
+ if "|" in joke:
80
+ tokens = joke.split("|")
81
+ if len(tokens) == 2:
82
+ intro = tokens[0].strip()
83
+ punch = tokens[1].strip()
84
+ if len(intro) > 10 and len(punch) > 5:
85
+ f.write(json.dumps({"input": intro, "output": punch}) + "\n")
src/reddit_dadjokes.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb55e62dc7077144a5b82165109b8770f94da60eee701045f39c7a7a28c7083a
3
+ size 19162024
src/requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ transformers
2
+ tqdm
3
+ datasets
4
+ torch
5
+ sentence_transformers
6
+ hnswlib