Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
1K - 10K
ArXiv:
License:
dipteshkanojia
commited on
Commit
•
2c12296
1
Parent(s):
9eb5c6d
modify reader
Browse files- README.md +1 -1
- check.ipynb +10 -3
README.md
CHANGED
@@ -24,7 +24,7 @@ task_ids:
|
|
24 |
|
25 |
This is the repository for PLOD Dataset subset being used for CW in NLP module 2023-2024 at University of Surrey.
|
26 |
|
27 |
-
### Dataset
|
28 |
|
29 |
We provide two variants of our dataset - Filtered and Unfiltered. They are described in our paper here.
|
30 |
|
|
|
24 |
|
25 |
This is the repository for PLOD Dataset subset being used for CW in NLP module 2023-2024 at University of Surrey.
|
26 |
|
27 |
+
### Original Dataset (Only for exploration. For CW, You must USE THE PLOD-CW subset)
|
28 |
|
29 |
We provide two variants of our dataset - Filtered and Unfiltered. They are described in our paper here.
|
30 |
|
check.ipynb
CHANGED
@@ -2,9 +2,16 @@
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
-
"execution_count":
|
6 |
"metadata": {},
|
7 |
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
{
|
9 |
"ename": "IndexError",
|
10 |
"evalue": "list index out of range",
|
@@ -12,7 +19,7 @@
|
|
12 |
"traceback": [
|
13 |
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
14 |
"\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)",
|
15 |
-
"Input \u001b[0;32mIn [
|
16 |
"\u001b[0;31mIndexError\u001b[0m: list index out of range"
|
17 |
]
|
18 |
}
|
@@ -35,7 +42,7 @@
|
|
35 |
" splits = line.split(\" \")\n",
|
36 |
" tokens.append(splits[0])\n",
|
37 |
" pos_tags.append(splits[1])\n",
|
38 |
-
" ner_tags.append(splits[
|
39 |
"\n",
|
40 |
" # last example\n",
|
41 |
" if tokens:\n",
|
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
+
"execution_count": 4,
|
6 |
"metadata": {},
|
7 |
"outputs": [
|
8 |
+
{
|
9 |
+
"name": "stdout",
|
10 |
+
"output_type": "stream",
|
11 |
+
"text": [
|
12 |
+
"0\n"
|
13 |
+
]
|
14 |
+
},
|
15 |
{
|
16 |
"ename": "IndexError",
|
17 |
"evalue": "list index out of range",
|
|
|
19 |
"traceback": [
|
20 |
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
21 |
"\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)",
|
22 |
+
"Input \u001b[0;32mIn [4]\u001b[0m, in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 16\u001b[0m tokens\u001b[38;5;241m.\u001b[39mappend(splits[\u001b[38;5;241m0\u001b[39m])\n\u001b[1;32m 17\u001b[0m pos_tags\u001b[38;5;241m.\u001b[39mappend(splits[\u001b[38;5;241m1\u001b[39m])\n\u001b[0;32m---> 18\u001b[0m ner_tags\u001b[38;5;241m.\u001b[39mappend(\u001b[43msplits\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m3\u001b[39;49m\u001b[43m]\u001b[49m\u001b[38;5;241m.\u001b[39mrstrip())\n\u001b[1;32m 20\u001b[0m \u001b[38;5;66;03m# last example\u001b[39;00m\n\u001b[1;32m 21\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m tokens:\n",
|
23 |
"\u001b[0;31mIndexError\u001b[0m: list index out of range"
|
24 |
]
|
25 |
}
|
|
|
42 |
" splits = line.split(\" \")\n",
|
43 |
" tokens.append(splits[0])\n",
|
44 |
" pos_tags.append(splits[1])\n",
|
45 |
+
" ner_tags.append(splits[2].rstrip())\n",
|
46 |
"\n",
|
47 |
" # last example\n",
|
48 |
" if tokens:\n",
|