Datasets:
File size: 1,962 Bytes
89cd787 b04e418 89cd787 b04e418 89cd787 f41b78f 89cd787 b04e418 89cd787 5dedc64 b04e418 89cd787 b04e418 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
from pywiki_custom import *
from urllib.parse import unquote
import io
import csv
import time
langs = {
"en",
"ru",
"pt",
"it",
"es",
"fr",
"de",
"nl"
}
n = 2200
def cleandata(lang):
input_file = f'data_{lang}.csv'
output_file = f'{lang}-wikihow-qa-dataset-{n / 1000}k.csv'
unique_urls = {}
with open(input_file, 'r') as f_input, open(output_file, 'w', newline='') as f_output:
csv_input = csv.reader(f_input)
csv_output = csv.writer(f_output)
header = next(csv_input)
header[3] = 'METADATA'
csv_output.writerow(header)
for row in csv_input:
try:
url = row[3]
if url not in unique_urls:
row[3] = f'{{"url": "{url}", "language": "{lang}"}}'
csv_output.writerow(row)
unique_urls[url] = True
else:
print(f"\033[91mDuplicate row found, url: {url}\033[0m")
except:
print(f"\033[91mBroken found, url: {row}\033[0m")
def getrandom():
how_to = RandomHowTo(lang)
wkhowto_url = how_to.url
theme = unquote(how_to.title.encode('utf-8'))
wkhowto_q = theme
wkhowto_a = how_to.print(extended=True)
return wkhowto_q, wkhowto_a, wkhowto_url
for lang in langs:
print(f"\33[34mGenerating {lang}...\033[0m")
with open(f'data_{lang}.csv', mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['INSTRUCTION', 'RESPONSE', 'SOURCE', 'URL'])
for i in range(n):
wkhowto_q, wkhowto_a, wkhowto_url = getrandom()
data = [wkhowto_q, wkhowto_a, f'{lang}.wikihow.com', wkhowto_url]
writer.writerow(data)
print(f"{i+1} out of {n}\033[0m")
time.sleep(3)
print(f"\33[92mDone for {lang}!\033[0m\n")
for lang in langs:
cleandata(lang)
print("\33[32mDone for all!\033[0m") |