File size: 2,929 Bytes
26a7be4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import pandas as pd
import xmltodict
from sklearn.model_selection import train_test_split
import glob
import sys

filelist = glob.glob('tmx/*.tmx')

for tmxfile in filelist:
    print(f"Starting to parse {tmxfile}")

    verbose = 0
    with open(tmxfile) as fd:
        doc = xmltodict.parse(fd.read())
    
    try:
        #Use the first sentence pair to determine the target-source language
        source = doc['tmx']['body']['tu'][0]['tuv'][0]['@xml:lang']
        target = doc['tmx']['body']['tu'][1]['tuv'][1]['@xml:lang']
    except:
        source = doc['tmx']['body']['tu'][0]['tuv'][0]['@lang']
        target = doc['tmx']['body']['tu'][1]['tuv'][1]['@lang']

    # Extract content from xml/tmx
    data=[]
    errorcount = 0
    for item in doc['tmx']['body']['tu'][:]:
        trans = {}
        valid = 1
        trans[source] = item['tuv'][0]['seg']
        trans[target] = item['tuv'][1]['seg']
        if isinstance(trans[source],dict):
            try:
                trans[source] = trans[source]['#text']
                trans[target] = trans[target]['#text']
            except:
                if verbose:
                    print("Dropping - Malformed XML")
                valid = 0
        
        if not trans[source] or not trans[target]:
            valid = 0
            if verbose:
                print("Dropping source/target does not exist")

        elif len(trans[source]) <= 1 or len(trans[target]) <=1:
            valid = 0
            if verbose:
                print("Dropping - Partly empty entity")

        elif '\t' in trans[source] or '\t' in trans[target]:
            valid = 0
            if verbose:
                print("Dropping - Contains tabulator")


        if valid == 1:
            data.append(trans)
        else:
            errorcount += 1

    # Create dataframe
    df = pd.DataFrame(data)

    # Shuffle
    # df = df.sample(frac=1).reset_index(drop=True)

    # Train - test - dev
    #train, test = train_test_split(df, test_size=0.2)
    #test, dev = train_test_split(test, test_size=0.5)

    # Write the datasets to disk
    #train.to_csv('train_tmp.tsv', index=False, header=False, sep='\t')
    #test.to_csv('test_tmp.tsv', index=False, header=False, sep='\t')
    #dev.to_csv('dev_tmp.tsv', index=False, header=False, sep='\t')

    #Rename some languages
    if source=="fi":
        source="fin"
    if target=="fi":
        target="fin"
    if source=="nb":
        source="nob"
    if target=="nb":
        target="nob"
    if source=="se":
        source="sme"
    if target=="se":
        target="sme"

    filename = "tsv_source_target/"+source+target+".tsv"
    df.to_csv(filename, index=False, header=False, sep='\t')

    #print(f"Finished writing train.tsv ({len(train)}), test.tsv ({len(test)}) and dev.tsv ({len(dev)}) to disk.")
    print(f"Finished writing {filename} ({len(df)}) to disk.")
    print(f"Totally {errorcount} errors")