File size: 4,384 Bytes
ac614a9
 
 
 
 
 
 
 
 
 
 
40edb18
ac614a9
 
40edb18
 
 
ac614a9
40edb18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ac614a9
 
40edb18
ac614a9
 
 
 
 
 
 
 
 
 
 
 
40edb18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ac614a9
 
40edb18
 
ac614a9
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
from qwikidata.entity import WikidataItem
from qwikidata.json_dump import WikidataJsonDump
import pyarrow as pa
import pyarrow.parquet as pq
import pandas as pd

# create an instance of WikidataJsonDump
wjd_dump_path = "wikidata-20240304-all.json.bz2"
wjd = WikidataJsonDump(wjd_dump_path)

# Create an empty list to store the dictionaries
data = []

# # Iterate over the entities in wjd and add them to the list
for ii, entity_dict in enumerate(wjd):
    if ii > 1000:
        break
    
    if entity_dict["type"] == "item":
        data.append(entity_dict)

# Create a Parquet schema for the [Wikidata Snak Format](https://doc.wikimedia.org/Wikibase/master/php/docs_topics_json.html#json_snaks)
# {
#     "snaktype": "value",
#     "property": "P17",
#     "datatype": "wikibase-item",
#     "datavalue": {
#     "value": {
#         "entity-type": "item",
#         "id": "Q30",
#         "numeric-id": 30
#     },
#     "type": "wikibase-entityid"
# }
snak = pa.struct([
    ("snaktype", pa.string()),
    ("property", pa.string()),
    ("datatype", pa.string()),
    ("datavalue", pa.struct([
        ("value", pa.struct([
            ("entity-type", pa.string()),
            ("id", pa.string()),
            ("numeric-id", pa.int64())
        ])),
        ("type", pa.string())
    ]))
])


# TODO: Schema for Data Set
# Based on the [Wikidata JSON Format Docs](https://doc.wikimedia.org/Wikibase/master/php/docs_topics_json.html)
# Create a schema for the table
# {
#  "id": "Q60",
#  "type": "item",
#  "labels": {},
#  "descriptions": {},
#  "aliases": {},
#  "claims": {},
#  "sitelinks": {},
#  "lastrevid": 195301613,
#  "modified": "2020-02-10T12:42:02Z"
#}
schema = pa.schema([
    ("id", pa.string()),
    ("type", pa.string()),
    # {
    #   "labels": {
    #     "en": {
    #     "language": "en",
    #       "value": "New York City"
    #     },
    #     "ar": {
    #       "language": "ar",
    #       "value": "\u0645\u062f\u064a\u0646\u0629 \u0646\u064a\u0648 \u064a\u0648\u0631\u0643"
    #     }
    #   }
    ("labels", pa.map_(pa.string(), pa.struct([
        ("language", pa.string()),
        ("value", pa.string())
    ]))),
    #   "descriptions": {
    #     "en": {
    #       "language": "en",
    #       "value": "largest city in New York and the United States of America"
    #     },
    #     "it": {
    #       "language": "it",
    #       "value": "citt\u00e0 degli Stati Uniti d'America"
    #     }
    #   }
    ("descriptions", pa.map_(pa.string(), pa.struct([
        ("language", pa.string()),
        ("value", pa.string())
    ]))),
    #   "aliases": {
    #     "en": [
    #       {
    #         "language": "en",pa.string
    #         "value": "New York"
    #       }
    #     ],
    #     "fr": [
    #       {
    #         "language": "fr",
    #         "value": "New York City"
    #       },
    #       {
    #         "language": "fr",
    #         "value": "NYC"
    #       },
    #       {
    #         "language": "fr",
    #         "value": "The City"
    #       },
    #       {
    #         "language": "fr",
    #         "value": "La grosse pomme"
    #       }
    #     ]
    #   }
    # }
    ("aliases", pa.map_(pa.string(), pa.list_(pa.struct([
        ("language", pa.string()),
        ("value", pa.string())
    ])))),
    # {
    #   "claims": {
    #     "P17": [
    #       {
    #         "id": "q60$5083E43C-228B-4E3E-B82A-4CB20A22A3FB",
    #         "mainsnak": {},
    #         "type": "statement",
    #         "rank": "normal",
    #         "qualifiers": {
    #           "P580": [],
    #           "P5436": []
    #         },
    #         "references": [
    #           {
    #             "hash": "d103e3541cc531fa54adcaffebde6bef28d87d32",
    #             "snaks": []
    #           }
    #         ]
    #       }
    #     ]
    #   }
    # }
    ("claims", pa.map_(pa.string(), pa.list_(snak))),
    ("sitelinks", pa.struct([
        ("site", pa.string()),
        ("title", pa.string())
    ])),
    ("lastrevid", pa.int64()),
    ("modified", pa.string())
])

# Create a table from the list of dictionaries and the schema
table = pa.Table.from_pandas(pd.DataFrame(data), schema=schema)
# table = pa.Table.from_pandas(pd.DataFrame(wjd))

# Write the table to disk as parquet
parquet_path = "wikidata-20240304-all.parquet"
pq.write_table(table, parquet_path)