#! coding:utf-8

import os
import codecs
import jieba
import jieba.posseg as psg


class Segmenter(object):
    """
    jieba分词工具的简易封装类

    """
    def __init__(self, user_dict=None):
        if user_dict and os.path.isfile(user_dict):
            jieba.load_userdict(user_dict)
            self._user_dict = user_dict
            # print('load user dict' + user_dict)
        else:
            self._user_dict = None

    def segment_sentence(self, sentence):
        """
        对单个句子进行词语切分，返回结果中将移除空白项

        :param sentence: 待切分句子
        :return: 返回切分后的词语列表
        """
        sentence = sentence.strip()
        if sentence:
            return [word for word in jieba.cut(sentence) if word.strip()]
        return None

    def segment_file(self, targetfile, resultfile, encoding='utf-8'):
        """
        对文件中的句子进行切分（按行逐句子）

        :param targetfile: 待切分文件
        :param resultfile: 结果保存文件
        :param encoding: 文件编码
        :return:
        """
        with codecs.open(resultfile, 'w', encoding) as fw:
            with codecs.open(targetfile, 'r', encoding) as fr:
                for line in fr.readlines():
                    sens = self.segment_sentence(line)
                    if sens:
                        fw.write(' '.join(sens) + '\n')

    def seg_and_tag_sentence(self, sentence):
        """
        对单个句子进行词语切分及词性标注，返回结果中将移除空白项

        :param sentence: 待切分句子
        :return: 返回切分结果
        """
        sentence = sentence.strip()
        if sentence:
            return [(word, tag) for word, tag in psg.cut(sentence) if word.strip()]
        return None

    def remove_dict_words(self, wordlist):
        """
        从用户自定义字典中删除某些词语（字典中并未实际删除，但后续的分词中将起作用）

        :param wordlist: 待删除词语列表
        :return:
        """
        if not self._user_dict:
            return None
        if not isinstance(wordlist, (str, list, tuple)):
            raise TypeError('wordlist should be a str, list or tuple.')
        if isinstance(wordlist, str):
            jieba.del_word(wordlist)
        else:
            for word in wordlist:
                jieba.del_word(word)


if __name__ == '__main__':
    # seg = Segmenter('dictionary\\symptom_list.txt')
    # sen = '患者余毒伤阴，伴肝郁。'
    # print(seg.seg_and_tag_sentence(sen))
    sen = '患者鼻梁内陷，人中有点红。'
    print(list(psg.cut(sen)))
    # jieba.load_userdict('dictionary\\test.txt')
    # print(list(psg.cut(sen)))
    # jieba.del_word('鼻梁内陷')
    # print(list(psg.cut(sen)))
    print(0)
