# Simple usage
import codecs
import sys
import re
import nltk

name_list = ['Clancy', 'Frank', 'Sally', 'Marcus', 'Jess', 'Thomas', 'Rosie', 'Alan', 'Alex']
name_list = [('<NE>'+str(i+1), name) for i, name in enumerate(name_list)]
name_dict = {}
for n in name_list:
    name_dict[n[0]] = n[1]

MAX_SEN_LEN = 250


def contain_sep(item):
    return ' <SEP> ' in item


def clear_text(text):
    text = text.strip()
    global is_filter
    if is_filter == 'true':
        text = text.split(' <SEP> ')[1]
    for name in name_dict:
        text = text.replace(name, name_dict[name])
    text = text.split(' ')[:MAX_SEN_LEN]
    text = ' '.join(text)
    if not text.endswith('.'):
        last_index = text.rfind('.')
        if last_index != -1 and (len(text) - last_index < 30):
            text = text[:last_index + 1]
    return text


def split_sentence(paragraph):
    tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
    sentences = tokenizer.tokenize(paragraph)
    return sentences


def split_into_pair(texts):
    story_id = 0
    sentence_pair = []
    for text in texts:
        story_id += 1
        sentences = split_sentence(text)
        if len(sentences) < 2:
            continue
        merged_sens = []
        last_short_sen = ''
        for sen in sentences:
            # correct sentence
            if len(sen.split(' ')) > 5:
                if last_short_sen != '':
                    sen = ' '.join([last_short_sen, sen])
                    last_short_sen = ''
                merged_sens.append(sen)
            else:
                if last_short_sen == '':
                    last_short_sen = sen
                else:
                    last_short_sen = ' '.join([last_short_sen, sen])
                    # check cache short sentence
                    if len(last_short_sen.split(' ')) > 5:
                        merged_sens.append(last_short_sen)
                        last_short_sen = ''
        if last_short_sen != '':
            merged_sens[-1] = ' '.join([merged_sens[-1], last_short_sen])
        # if ' '.join(merged_sens) != text:
        #     print ' '.join(merged_sens)
        #     print text
        #     break
        # check number sentence of story
        if len(merged_sens) > 1:
            sen_p = zip(merged_sens[:-1], merged_sens[1:])
            sen_p = [(str(story_id), str(i), p[0], p[1]) for i, p in enumerate(sen_p)]
            sentence_pair.extend(sen_p)
    return sentence_pair


input_file = sys.argv[1]
is_filter = sys.argv[2]
# input_file = '/home/rickwwang/project_own/story_generation/data/test.wp_target_500'
output_file = input_file + '.sentence_pair'
# output_file = 'parsed.txt'
texts = codecs.open(input_file, 'r', encoding='utf8').readlines()

print('total number of test example', len(texts))
if is_filter == 'true':
    texts = list(filter(contain_sep, texts))
texts = list(map(clear_text, texts))
print('correct number of test example', len(texts))
print('hyp')
print(texts[0])

sentence_pair = split_into_pair(texts)
sentence_pair = list(map(lambda x: '\t'.join(x), sentence_pair))
with codecs.open(output_file, 'w', encoding='utf8') as fout:
    fout.write('\n'.join(sentence_pair))
    fout.write('\n')
