#/usr/bin/env python
#-*-coding:utf-8-*-

import re
HAN_PAATTERN = "[\u4E00-\u9FD5]"
URL_PATTERN = "(https?|ftp|file)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]"

import jieba
import json
END_POS="</s>"
START_POS="<s>"
PAD_POS = "<pad>"
UNK_POS = "<unk>"

class TextClean():
    def __init__(self, input_file, out_file):
        self.input_file = open(input_file)
        self.out_file = open(out_file, 'w')

        self.word_to_id = {}

        self.word_to_id[PAD_POS] = 0
        self.word_to_id[START_POS] = 1
        self.word_to_id[END_POS] = 2
        self.word_to_id[UNK_POS] = 3

        self.id_to_word = {v:w for w, v in self.word_to_id.items()}


    def start(self):
        for i, line in enumerate(self.input_file):
            if i% 1000 == 0:
                print(i)
            if not self.text_filter(line):
                continue
            line = self.clean_text(line)
            words = list(jieba.cut(line))
            for w in words:
                if w in self.word_to_id:
                    continue
                word_id = len(self.word_to_id)
                self.word_to_id[w] = word_id
                self.id_to_word[word_id] = w
            self.out_file.write(" ".join(words))
        json.dump(self.word_to_id, open("word_to_id.json", 'w'),ensure_ascii=False)
        json.dump(self.id_to_word, open("id_to_word.json", 'w'), ensure_ascii=False)
        print("word count", len(self.word_to_id))


    def text_filter(self, content):
        if len(content) < 5:
            return False
        if not re.search(HAN_PAATTERN, content) :
            return False
        if re.search(URL_PATTERN, content):
            return False
        return True

    def clean_text(self, content):
        content = re.sub("\([^\)]*\)","", content)
        return content


if __name__ == '__main__':
    input_file = "comment_65.txt"
    out_file = "comment_65_clean.txt"
    cleaner = TextClean(input_file, out_file)
    cleaner.start()