#!/usr/bin/python
# -*- coding:utf-8 -*-
import glob
import re
import shutil
import string
# import sys

import jieba
import jieba.posseg
import os

import settings
from api import utils

encode = 'utf-8'
# encode = 'utf8mb4'
# encode = 'GB18030'

# try:
#     reload(sys)
#     sys.setdefaultencoding("utf-8")
# except:
#     pass

encode_code = 'utf-8'
decode_code = 'utf-8'


if os.path.exists(settings.BASE_PATH):
    shutil.rmtree(settings.BASE_PATH)
os.mkdir(settings.BASE_PATH)
os.mkdir('%s%swords' % (settings.BASE_PATH, os.sep))
os.mkdir('%s%swordcount' % (settings.BASE_PATH, os.sep))


class TextProcesser(object):
    def __init__(self):
        pass

    # 将书分为章节
    @staticmethod
    def divide_into_chapter():
        file_in = open(settings.BOOK_PATH, 'rb')
        line = file_in.readline()
        chapter_cnt = 1
        chapter_text = b""

        while line:
            if '[(' in line.decode(encode_code):
                path_str = settings.BASE_PATH + '/chapter-' + str(chapter_cnt)
                file_out = open(path_str, 'ab')
                file_out.write(chapter_text)
                chapter_cnt += 1
                file_out.close()
                chapter_text = line
            else:
                chapter_text += line

            line = file_in.readline()

        file_in.close()

    # 对所有章节分词
    def perform_segmentation(self):
        file_paths = utils.get_all_file_paths(settings.BOOK_PATH, ex=['.txt'])
        all_len = len(file_paths)
        print(all_len)
        for i, file_path in enumerate(file_paths):
            utils.get_process(i, all_len)
            file_name = os.path.splitext(os.path.basename(file_path))[0]
            file_ex = os.path.splitext(os.path.basename(file_path))[1]
            with open(file_path, 'rb') as file_in:
                self.divide_into_words(file_in, file_name)

    # 对文章空格分词并生成词性文本
    @staticmethod
    def divide_into_words(document, doc_id):
        print(doc_id, document)
        path_str_word = '%s/words/word-%s.txt' % (settings.BASE_PATH, doc_id)
        path_str_flag = '%s/words/flag-%s.txt' % (settings.BASE_PATH, doc_id)
        file_out_word = open(path_str_word, 'ab')
        file_out_flag = open(path_str_flag, 'ab')

        line = utils.get_str(document.readline(), decode_code)
        while line:
            # seg_list = jieba.cut(line, cut_all=False)
            seg_list_word = [i.word for i in jieba.posseg.cut(line)]
            seg_list_flag = [i.flag for i in jieba.posseg.cut(line)]
            words_word = " ".join(seg_list_word).encode(encode_code)
            words_flag = ('%s\n' % " ".join(seg_list_flag)).encode(encode_code)
            file_out_word.write(words_word)
            file_out_flag.write(words_flag)
            line = utils.get_str(document.readline(), decode_code)

        file_out_word.close()
        file_out_flag.close()

    # 对所有文档进行词频统计
    def perform_wordcount(self):
        file_paths = utils.get_all_file_paths('%s/words' % settings.BASE_PATH, ex=['.txt'])
        for file_path in file_paths:
            file_name = os.path.splitext(os.path.basename(file_path))[0]
            file_ex = os.path.splitext(os.path.basename(file_path))[1]
            with open(file_path, 'rb') as file_in:
                self.count_words(b"".join(file_in.readlines()), file_name)

    # 将每个文档去除标点后，再进行词频统计
    def count_words(self, document, doc_id):
        line = utils.get_str(document, decode_code)

        result_dict = {}
        line = line.translate(string.punctuation)  # 去除英文标点
        line = "".join(line.split('\n'))  # 去除回车
        line = self.sub_replace(line)  # 去除中文标点
        # word_array = []
        words = line.split()
        for word in words:
            if not result_dict.get(word):
                result_dict[word] = 1
            else:
                result_dict[word] += 1

        path_str = '%s/wordcount/%s.txt' % (settings.BASE_PATH, doc_id)
        with open(path_str, 'ab') as file_out:
            # 排序后写入文本
            sorted_result = sorted(result_dict.items(), key=lambda d: d[1], reverse=True)
            for one in sorted_result:
                line = "%s\t%s\n" % (one[0], str(one[1]))
                file_out.write(line.encode(encode_code))

    @staticmethod
    def sub_replace(line):
        regex = re.compile("[^\u4e00-\u9fa5a-zA-Z0-9\s]")
        return regex.sub('', line)


if __name__ == '__main__':
    processer = TextProcesser()
    # processer.divide_into_chapter()
    processer.perform_segmentation()
    processer.perform_wordcount()
