#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@author coldwind
"""
import json
import re
from mosestokenizer import *
import jieba

keyword_zh_file_name = 'keyword/keyword_zh.csv'
keyword_en_file_name = 'keyword/keyword_en.csv'

title_zh_file_name = 'title_remark/title_zh.csv'
title_en_file_name = 'title_remark/title_en.csv'

remark_zh_file_name = 'title_remark/remark_zh.csv'
remark_en_file_name = 'title_remark/remark_en.csv'


def write(output_file, content=''):
    output_file.write('%s\n' % content)


def tokenizer_en(en_text='Hello World!'):
    tokens = en_tokenize(en_text)
    return whitespace.join(tokens)


def tokenizer_zh(zh_text='我来到北京清华大学'):
    tokens = jieba.cut(zh_text, cut_all=False, HMM=True)
    return whitespace.join(tokens)


def main(path='/mnt/cephfs/mahao/text_translation/translation.csv'):
    with open(path, 'r') as input_file, \
            open(title_en_file_name, 'w') as title_en_file, \
            open(title_zh_file_name, 'w') as title_zh_file, \
            open(remark_en_file_name, 'w') as remark_en_file, \
            open(remark_zh_file_name, 'w') as remark_zh_file:
        count = 0
        remark_en_count = 0
        remark_zh_count = 0
        title_en_count = 0
        title_zh_count = 0

        time_sequence_threshold = 6
        for line in input_file:
            count += 1

            if count % 1000 == 0:
                print('当前处理数量: %d' % count)

            text = json.loads(line.strip())
            # keyword_origin = re.sub(u'[，,、.。]', ' ', text['keyword_origin'])
            # keyword_translation = re.sub(u'[，,、.。]', ' ', text['keyword_translation'])
            #
            # if keyword_origin and keyword_translation and len(keyword_origin) > time_sequence_threshold and len(
            #         keyword_translation) > time_sequence_threshold:
            #     write(keyword_en_file, keyword_origin)
            #     write(keyword_zh_file, keyword_translation)

            remark_origin = text['remark_origin'].replace('\n', ' ')
            remark_translation = text['remark_translation'].replace('\n', ' ')

            if remark_origin and remark_translation and len(remark_origin) > time_sequence_threshold and len(
                    remark_translation) > time_sequence_threshold:
                write(remark_en_file, tokenizer_en(remark_origin))
                write(remark_zh_file, tokenizer_zh(remark_translation))
                remark_en_count += 1
                remark_zh_count += 1
                assert remark_en_count == remark_zh_count

            title_origin = text['title_origin'].replace('\n', ' ')
            title_translation = text['title_translation'].replace('\n', ' ')

            if title_origin and title_translation and len(title_origin) > time_sequence_threshold and len(
                    title_translation) > time_sequence_threshold:
                write(title_en_file, tokenizer_en(title_origin))
                write(title_zh_file, tokenizer_zh(title_translation))
                title_en_count += 1
                title_zh_count += 1
                assert title_en_count == title_zh_count


if __name__ == '__main__':
    en_tokenize = MosesTokenizer('en')
    whitespace = ' '
    main()
    en_tokenize.close()
