keplersj commited on
Commit
ac614a9
1 Parent(s): 8a53c72

feat: basic version of convert.py

Browse files
Files changed (2) hide show
  1. convert.py +147 -0
  2. requirements.txt +3 -0
convert.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from qwikidata.entity import WikidataItem
2
+ from qwikidata.json_dump import WikidataJsonDump
3
+ import pyarrow as pa
4
+ import pyarrow.parquet as pq
5
+ import pandas as pd
6
+
7
+ # create an instance of WikidataJsonDump
8
+ wjd_dump_path = "wikidata-20240304-all.json.bz2"
9
+ wjd = WikidataJsonDump(wjd_dump_path)
10
+
11
+ # Create an empty list to store the dictionaries
12
+ # data = []
13
+
14
+ # # Iterate over the entities in wjd and add them to the list
15
+ # for ii, entity_dict in enumerate(wjd):
16
+ # if ii > 1:
17
+ # break
18
+
19
+ # if entity_dict["type"] == "item":
20
+ # data.append(entity_dict)
21
+
22
+ # TODO: Schema for Data Set
23
+ # Create a schema for the table
24
+ # {
25
+ # "id": "Q60",
26
+ # "type": "item",
27
+ # "labels": {},
28
+ # "descriptions": {},
29
+ # "aliases": {},
30
+ # "claims": {},
31
+ # "sitelinks": {},
32
+ # "lastrevid": 195301613,
33
+ # "modified": "2020-02-10T12:42:02Z"
34
+ #}
35
+ # schema = pa.schema([
36
+ # ("id", pa.string()),
37
+ # ("type", pa.string()),
38
+ # # {
39
+ # # "labels": {
40
+ # # "en": {
41
+ # # "language": "en",
42
+ # # "value": "New York City"
43
+ # # },
44
+ # # "ar": {
45
+ # # "language": "ar",
46
+ # # "value": "\u0645\u062f\u064a\u0646\u0629 \u0646\u064a\u0648 \u064a\u0648\u0631\u0643"
47
+ # # }
48
+ # # }
49
+ # ("labels", pa.map_(pa.string(), pa.struct([
50
+ # ("language", pa.string()),
51
+ # ("value", pa.string())
52
+ # ]))),
53
+ # # "descriptions": {
54
+ # # "en": {
55
+ # # "language": "en",
56
+ # # "value": "largest city in New York and the United States of America"
57
+ # # },
58
+ # # "it": {
59
+ # # "language": "it",
60
+ # # "value": "citt\u00e0 degli Stati Uniti d'America"
61
+ # # }
62
+ # # }
63
+ # ("descriptions", pa.map_(pa.string(), pa.struct([
64
+ # ("language", pa.string()),
65
+ # ("value", pa.string())
66
+ # ]))),
67
+ # # "aliases": {
68
+ # # "en": [
69
+ # # {
70
+ # # "language": "en",pa.string
71
+ # # "value": "New York"
72
+ # # }
73
+ # # ],
74
+ # # "fr": [
75
+ # # {
76
+ # # "language": "fr",
77
+ # # "value": "New York City"
78
+ # # },
79
+ # # {
80
+ # # "language": "fr",
81
+ # # "value": "NYC"
82
+ # # },
83
+ # # {
84
+ # # "language": "fr",
85
+ # # "value": "The City"
86
+ # # },
87
+ # # {
88
+ # # "language": "fr",
89
+ # # "value": "La grosse pomme"
90
+ # # }
91
+ # # ]
92
+ # # }
93
+ # # }
94
+ # ("aliases", pa.map_(pa.string(), pa.struct([
95
+ # ("language", pa.string()),
96
+ # ("value", pa.string())
97
+ # ]))),
98
+ # # {
99
+ # # "claims": {
100
+ # # "P17": [
101
+ # # {
102
+ # # "id": "q60$5083E43C-228B-4E3E-B82A-4CB20A22A3FB",
103
+ # # "mainsnak": {},
104
+ # # "type": "statement",
105
+ # # "rank": "normal",
106
+ # # "qualifiers": {
107
+ # # "P580": [],
108
+ # # "P5436": []
109
+ # # },
110
+ # # "references": [
111
+ # # {
112
+ # # "hash": "d103e3541cc531fa54adcaffebde6bef28d87d32",
113
+ # # "snaks": []
114
+ # # }
115
+ # # ]
116
+ # # }
117
+ # # ]
118
+ # # }
119
+ # # }
120
+ # ("claims", pa.map_(pa.string(), pa.array(pa.struct([
121
+ # ("id", pa.string()),
122
+ # ("mainsnak", pa.struct([])),
123
+ # ("type", pa.string()),
124
+ # ("rank", pa.string()),
125
+ # ("qualifiers", pa.map_(pa.string(), pa.array(pa.struct([
126
+
127
+ # ])))),
128
+ # ("references", pa.array(pa.struct([
129
+ # ("hash", pa.string()),
130
+ # ("snaks", pa.array(pa.struct([])))
131
+ # ])))
132
+ # ])))),
133
+ # ("sitelinks", pa.struct([
134
+ # ("site", pa.string()),
135
+ # ("title", pa.string())
136
+ # ])),
137
+ # ("lastrevid", pa.int64()),
138
+ # ("modified", pa.string())
139
+ # ])
140
+
141
+ # Create a table from the list of dictionaries and the schema
142
+ # table = pa.Table.from_pandas(pd.DataFrame(data), schema=schema)
143
+ table = pa.Table.from_pandas(pd.DataFrame(wjd))
144
+
145
+ # Write the table to disk as parquet
146
+ parquet_path = "wikidata-20240304-all.parquet"
147
+ pq.write_table(table, parquet_path)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ qwikidata
2
+ pyarrow
3
+ pandas