#!/usr/bin/env python
# -*- coding: utf-8 -*-

import nltk.data


__author__ = 'gogo'

__version__ = '0.0.1'


def make_short_desc(desc, max_len=255):
    sentences = tokenizer.tokenize(desc)
    short_desc = ''
    for s in sentences:
        if len(short_desc + s) > max_len and short_desc != '':
            break
        elif len(short_desc + s) > max_len and short_desc == '':
            short_desc = sentences[0]
        else:
            short_desc += s

    return short_desc.strip()



tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')

with open('test_result/all_words_desc_test.txt') as f:
    for line in f:
        if line[0] != '-':
            part_line = line.split('->', 1)
            if len(part_line) == 2:
                # print part_line[0], '->', make_short_desc(part_line[1])
                with open("short_desc/" + part_line[0].strip() + ".txt", "w") as text_file:
                    text_file.write(make_short_desc(part_line[1]))