Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
dipteshkanojia commited on
Commit
9eb5c6d
1 Parent(s): 54d7821

modify reader

Browse files
Files changed (2) hide show
  1. PLOD-CW.py +1 -2
  2. check.ipynb +74 -0
PLOD-CW.py CHANGED
@@ -2,7 +2,6 @@ import os
2
 
3
  import datasets
4
  from typing import List
5
- import json
6
 
7
  logger = datasets.logging.get_logger(__name__)
8
 
@@ -124,7 +123,7 @@ class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
124
  splits = line.split(" ")
125
  tokens.append(splits[0])
126
  pos_tags.append(splits[1])
127
- ner_tags.append(splits[3].rstrip())
128
  # last example
129
  if tokens:
130
  yield guid, {
 
2
 
3
  import datasets
4
  from typing import List
 
5
 
6
  logger = datasets.logging.get_logger(__name__)
7
 
 
123
  splits = line.split(" ")
124
  tokens.append(splits[0])
125
  pos_tags.append(splits[1])
126
+ ner_tags.append(splits[2].rstrip())
127
  # last example
128
  if tokens:
129
  yield guid, {
check.ipynb ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 3,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "ename": "IndexError",
10
+ "evalue": "list index out of range",
11
+ "output_type": "error",
12
+ "traceback": [
13
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
14
+ "\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)",
15
+ "Input \u001b[0;32mIn [3]\u001b[0m, in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 15\u001b[0m tokens\u001b[38;5;241m.\u001b[39mappend(splits[\u001b[38;5;241m0\u001b[39m])\n\u001b[1;32m 16\u001b[0m pos_tags\u001b[38;5;241m.\u001b[39mappend(splits[\u001b[38;5;241m1\u001b[39m])\n\u001b[0;32m---> 17\u001b[0m ner_tags\u001b[38;5;241m.\u001b[39mappend(\u001b[43msplits\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m3\u001b[39;49m\u001b[43m]\u001b[49m\u001b[38;5;241m.\u001b[39mrstrip())\n\u001b[1;32m 18\u001b[0m \u001b[38;5;66;03m# last example\u001b[39;00m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m tokens:\n",
16
+ "\u001b[0;31mIndexError\u001b[0m: list index out of range"
17
+ ]
18
+ }
19
+ ],
20
+ "source": [
21
+ "with open(\"data/train.conll\", encoding=\"utf-8\") as f:\n",
22
+ " guid = 0\n",
23
+ " tokens = []\n",
24
+ " pos_tags = []\n",
25
+ " ner_tags = []\n",
26
+ " for line in f:\n",
27
+ " if line.startswith(\"-DOCSTART-\") or line == \"\" or line == \"\\n\":\n",
28
+ " if tokens:\n",
29
+ " guid += 1\n",
30
+ " tokens = []\n",
31
+ " pos_tags = []\n",
32
+ " ner_tags = []\n",
33
+ " else:\n",
34
+ " print(guid)\n",
35
+ " splits = line.split(\" \")\n",
36
+ " tokens.append(splits[0])\n",
37
+ " pos_tags.append(splits[1])\n",
38
+ " ner_tags.append(splits[3].rstrip())\n",
39
+ "\n",
40
+ " # last example\n",
41
+ " if tokens:\n",
42
+ " print(\"lst\")"
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "code",
47
+ "execution_count": null,
48
+ "metadata": {},
49
+ "outputs": [],
50
+ "source": []
51
+ }
52
+ ],
53
+ "metadata": {
54
+ "kernelspec": {
55
+ "display_name": "hfdataset",
56
+ "language": "python",
57
+ "name": "python3"
58
+ },
59
+ "language_info": {
60
+ "codemirror_mode": {
61
+ "name": "ipython",
62
+ "version": 3
63
+ },
64
+ "file_extension": ".py",
65
+ "mimetype": "text/x-python",
66
+ "name": "python",
67
+ "nbconvert_exporter": "python",
68
+ "pygments_lexer": "ipython3",
69
+ "version": "3.9.12"
70
+ }
71
+ },
72
+ "nbformat": 4,
73
+ "nbformat_minor": 2
74
+ }