import sys, util.gzopen, random

singleton_count = 0
mwe_count = 0
train_count = 0
test_count = 0

def get_sentences(file):
    sentences=[]
    global singleton_count
    sentence = []
    for line in file:
        if line.strip() == '':
            # remove single word sentences
            if True or len(sentence) > 1:
                sentence.append('\n')
                sentences.append(sentence)
            else:
                singleton_count += 1
            sentence = []
            continue
        sentence.append(line)

    if len(sentence) > 1:
        sentence.append('\n')
        sentences.append(sentence)
    return sentences

def next_sentence(file):
    global singleton_count
    sentence = []
    for line in file:
        if line.strip() == '':
            # remove single word sentences
            if True or len(sentence) > 1:
                sentence.append('\n')
                yield sentence
            else:
                singleton_count += 1
            sentence = []
            continue

        sentence.append(line)
    if len(sentence) > 1:
        sentence.append('\n')
        yield sentence

def process_mwe(line):
    global mwe_count
    tokens = line.split()
    if len(tokens) <= 2: 
        return line
    mwe_count += 1
    tag = tokens[-1]
    new_line = tokens[0] + '\t' + tag + '\n'
    for word in tokens[1:-1]:
        new_line += word + '\tditto\n' 
    return new_line
        
train       = util.gzopen.gzopen(sys.argv[3]+'.words.gz','w')
train_lex   = util.gzopen.gzopen(sys.argv[3]+'.lex.gz','w')
test        = util.gzopen.gzopen(sys.argv[4]+'.words.gz','w')
test_lex    = util.gzopen.gzopen(sys.argv[4]+'.lex.gz','w')

#for s in zip(next_sentence(data),next_sentence(lex)):
data = open('%s.words'%sys.argv[1])
word_sentences = get_sentences(data)
data.close()

lex = open('%s.lex'%sys.argv[1])
lex_sentences  = get_sentences(lex)
lex.close()

cut_index = float(sys.argv[2])

sentences=[s for s in zip(word_sentences,lex_sentences)]
random.shuffle(sentences)

for i,(sentence,lex_sentence) in enumerate(sentences):
    if cut_index > i:
        out = train
        out_lex = train_lex
        train_count += 1
    else:
        out = test
        out_lex = test_lex
        test_count += 1
    for line in sentence:
        print >>out, process_mwe(line),
    for line in lex_sentence:
        print >>out_lex, process_mwe(line),

print "Training sentences:", train_count 
print "Testing sentences:", test_count 
print "Singletons Removed:", singleton_count/2
print "MWEs split:", mwe_count/2
