tunji17 commited on
Commit
213bf26
1 Parent(s): a60711d

adds jsonl dataset

Browse files
Files changed (2) hide show
  1. fakenews_data.jsonl +0 -0
  2. process_data.py +41 -0
fakenews_data.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
process_data.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+
3
+ dataset_name = "2nji/makebelieve-480"
4
+
5
+ # Load the dataset
6
+ dataset = load_dataset(dataset_name)
7
+
8
+ # Shuffle the dataset and slice it
9
+ dataset = dataset['train'].shuffle(seed=42).select(range(480))
10
+
11
+
12
+ # Define a function to transform the data
13
+ def transform_conversation(example):
14
+ conversation_text = example['text']
15
+ segments = conversation_text.split('###')
16
+
17
+ reformatted_segments = []
18
+
19
+ # Iterate over pairs of segments
20
+ for i in range(1, len(segments) - 1, 2):
21
+ human_text = segments[i].strip().replace('Human:', '').strip()
22
+
23
+ # Check if there is a corresponding assistant segment before processing
24
+ if i + 1 < len(segments):
25
+ assistant_text = segments[i+1].strip().replace('Assistant:', '').strip()
26
+
27
+ # Apply the new template
28
+ reformatted_segments.append(f'<s>[INST] {human_text} [/INST] {assistant_text} </s>')
29
+ else:
30
+ # Handle the case where there is no corresponding assistant segment
31
+ reformatted_segments.append(f'<s>[INST] {human_text} [/INST] </s>')
32
+
33
+ return {'text': ''.join(reformatted_segments)}
34
+
35
+
36
+ # Apply the transformation
37
+ transformed_dataset = dataset.map(transform_conversation)
38
+
39
+ # Upload the dataset to the Hub
40
+ # Don't forget to replace the token with your own
41
+ transformed_dataset.push_to_hub(dataset_name, token="...")