Datasets:

Modalities:
Tabular
Text
ArXiv:
Libraries:
Datasets
License:
Jonathan Li commited on
Commit
a8ea5b9
1 Parent(s): 927a07e

Add new ordering

Browse files
Files changed (4) hide show
  1. data/dev.jsonl +2 -2
  2. data/train.jsonl +2 -2
  3. data_anon/train.jsonl +1 -1
  4. process.ipynb +17 -16
data/dev.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:83f05c94be1150625730f4ee9a6b901c8f61b766497a23e18fc36ae8099141fb
3
- size 21816062
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4f71bd11bb0a27021dcd88f2eca9f0400c42f435ec291f7895b4627b63b29f2
3
+ size 21816052
data/train.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f273707cb2ec891fb06a349a36cdbd9e1591208ea19de6146653d1052c89e1e5
3
- size 105354914
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:171f91a4c82b362878d06fecc5c8d7a15aa1009b0d84c4427753d8458b7d399e
3
+ size 105354910
data_anon/train.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:209755f6dfee89849455b89918babee0e33618c40e65a99e9a8cbd6164394dbd
3
  size 98259925
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b735134e63256e822ef0115735c59ccfaefe44488ffbfac8f25d80a3385073f
3
  size 98259925
process.ipynb CHANGED
@@ -11,18 +11,18 @@
11
  },
12
  {
13
  "cell_type": "code",
14
- "execution_count": 7,
15
  "metadata": {},
16
  "outputs": [],
17
  "source": [
18
- "_dev = pd.read_json(\"EN_dev.jsonl\", lines=True)\n",
19
- "_test = pd.read_json(\"EN_test.jsonl\", lines=True)\n",
20
- "_train = pd.read_json(\"EN_train.jsonl\", lines=True)"
21
  ]
22
  },
23
  {
24
  "cell_type": "code",
25
- "execution_count": 8,
26
  "metadata": {},
27
  "outputs": [],
28
  "source": [
@@ -39,7 +39,7 @@
39
  },
40
  {
41
  "cell_type": "code",
42
- "execution_count": 9,
43
  "metadata": {},
44
  "outputs": [],
45
  "source": [
@@ -49,7 +49,8 @@
49
  "d_remaining = {k: v[dist[k]:] for k, v in d.items()}\n",
50
  "\n",
51
  "new_rows = []\n",
52
- "for label in dev[\"violated\"]:\n",
 
53
  " new_rows.append(train.iloc[d_train[label].pop()])\n",
54
  "\n",
55
  "new_train = pd.concat([pd.DataFrame(new_rows), pd.DataFrame(train.iloc[i] for l in d_remaining.values() for i in l).sample(frac=1, random_state=42)])"
@@ -57,7 +58,7 @@
57
  },
58
  {
59
  "cell_type": "code",
60
- "execution_count": 10,
61
  "metadata": {},
62
  "outputs": [],
63
  "source": [
@@ -72,14 +73,14 @@
72
  "metadata": {},
73
  "outputs": [],
74
  "source": [
75
- "train = pd.read_json(\"data_anon/train.jsonl\", lines=True, orient=\"records\")\n",
76
- "test = pd.read_json(\"data_anon/test.jsonl\", lines=True, orient=\"records\")\n",
77
- "dev = pd.read_json(\"data_anon/dev.jsonl\", lines=True, orient=\"records\")"
78
  ]
79
  },
80
  {
81
  "cell_type": "code",
82
- "execution_count": 14,
83
  "metadata": {},
84
  "outputs": [],
85
  "source": [
@@ -93,13 +94,13 @@
93
  },
94
  {
95
  "cell_type": "code",
96
- "execution_count": null,
97
  "metadata": {},
98
  "outputs": [],
99
  "source": [
100
- "train.to_json(\"train.jsonl\", lines=True, orient=\"records\")\n",
101
- "test.to_json(\"test.jsonl\", lines=True, orient=\"records\")\n",
102
- "dev.to_json(\"dev.jsonl\", lines=True, orient=\"records\")"
103
  ]
104
  }
105
  ],
 
11
  },
12
  {
13
  "cell_type": "code",
14
+ "execution_count": 2,
15
  "metadata": {},
16
  "outputs": [],
17
  "source": [
18
+ "_dev = pd.read_json(\"EN_dev_Anon.jsonl\", lines=True)\n",
19
+ "_test = pd.read_json(\"EN_test_Anon.jsonl\", lines=True)\n",
20
+ "_train = pd.read_json(\"EN_train_Anon.jsonl\", lines=True)"
21
  ]
22
  },
23
  {
24
  "cell_type": "code",
25
+ "execution_count": 3,
26
  "metadata": {},
27
  "outputs": [],
28
  "source": [
 
39
  },
40
  {
41
  "cell_type": "code",
42
+ "execution_count": 4,
43
  "metadata": {},
44
  "outputs": [],
45
  "source": [
 
49
  "d_remaining = {k: v[dist[k]:] for k, v in d.items()}\n",
50
  "\n",
51
  "new_rows = []\n",
52
+ "for i in range(len(dev[\"violated\"])):\n",
53
+ " label = i % 2 == 0\n",
54
  " new_rows.append(train.iloc[d_train[label].pop()])\n",
55
  "\n",
56
  "new_train = pd.concat([pd.DataFrame(new_rows), pd.DataFrame(train.iloc[i] for l in d_remaining.values() for i in l).sample(frac=1, random_state=42)])"
 
58
  },
59
  {
60
  "cell_type": "code",
61
+ "execution_count": 5,
62
  "metadata": {},
63
  "outputs": [],
64
  "source": [
 
73
  "metadata": {},
74
  "outputs": [],
75
  "source": [
76
+ "train = pd.read_json(\"data/train.jsonl\", lines=True, orient=\"records\")\n",
77
+ "test = pd.read_json(\"data/test.jsonl\", lines=True, orient=\"records\")\n",
78
+ "dev = pd.read_json(\"data/dev.jsonl\", lines=True, orient=\"records\")"
79
  ]
80
  },
81
  {
82
  "cell_type": "code",
83
+ "execution_count": 4,
84
  "metadata": {},
85
  "outputs": [],
86
  "source": [
 
94
  },
95
  {
96
  "cell_type": "code",
97
+ "execution_count": 5,
98
  "metadata": {},
99
  "outputs": [],
100
  "source": [
101
+ "train.to_json(\"data/train.jsonl\", lines=True, orient=\"records\")\n",
102
+ "test.to_json(\"data/test.jsonl\", lines=True, orient=\"records\")\n",
103
+ "dev.to_json(\"data/dev.jsonl\", lines=True, orient=\"records\")"
104
  ]
105
  }
106
  ],