import pandas as pd import xmltodict from sklearn.model_selection import train_test_split import glob import sys import os filelist = glob.glob('sentences/*.txt') data = pd.DataFrame() for tsvfile in filelist: print(f"Processing {tsvfile}") data = pd.read_csv(tsvfile, sep='\t',on_bad_lines='skip',engine='python',encoding='utf8') lang=tsvfile.split('/')[1][0:3] if len(data.columns)==1: data.insert(0,'id','') data.columns=['id','source'] data['target']=lang data['source'] = "lang: "+data['source'] data['source'] = data['source'].str.replace('\t',' ') data = data.sample(frac=1).reset_index(drop=True) data = data[['source','target']] # Train - test - dev train, test = train_test_split(data, test_size=0.2) test, dev = train_test_split(test, test_size=0.5) # Write the datasets to disk train.to_csv('langid_datafiles/'+lang+'_train.tsv', index=False, header=False, sep='\t') test.to_csv('langid_datafiles/'+lang+'_test.tsv', index=False, header=False, sep='\t') dev.to_csv('langid_datafiles/'+lang+'_dev.tsv', index=False, header=False, sep='\t') print("Finished")