import pandas as pd import xmltodict from sklearn.model_selection import train_test_split import glob import sys import os filelist = glob.glob('*.jsonl') for jsonfile in filelist: data = pd.DataFrame([['source','target']]) temp = pd.DataFrame() print(f"Processing {jsonfile}") temp = pd.read_json(jsonfile, lines=True,encoding='utf8') errors = 0 for index, row in temp.iterrows(): try: engnob = ['nob: '+str(row['en']),str(row['no'])] data.loc[len(data)] = engnob nobeng = ['eng: '+str(row['no']),str(row['en'])] data.loc[len(data)] = nobeng except: errors += 1 print("Unable to convert this line") print(row) try: data['source'] = data['source'].str.replace('\t',' ') data['target'] = data['target'].str.replace('\t',' ') except: errors += 1 print("Key error") data = data.sample(frac=1).reset_index(drop=True) filename = jsonfile.replace(".jsonl",".tsv") # Write the datasets to disk data.to_csv(filename, index=False, header=False, sep='\t') print("Finished")