# -*- coding: utf-8 -*-
'''
Created on 2016年12月19日

@author: ZhuJiahui
'''

import os
import jieba as jb
import jieba.posseg as jbp
import nltk
import re
from file_utils.file_reader import read_to_1d_list, read_to_1d_list_gbk
from global_info.global_nlp import GlobalNLP


def get_stopwords():
    '''
    获取中英文停用词
    '''
    cn_mark_stop = read_to_1d_list_gbk(GlobalNLP.STOPWORDS_DIR + "/mark_stop.txt")
    cn_waste_content = read_to_1d_list_gbk(GlobalNLP.STOPWORDS_DIR + "/waste_content.txt")
    cn_global_stop = read_to_1d_list_gbk(GlobalNLP.STOPWORDS_DIR + "/cn_stopwords.txt")
    en_char_stop = read_to_1d_list_gbk(GlobalNLP.STOPWORDS_DIR + "/englishword.txt")
    en_mark_stop = read_to_1d_list_gbk(GlobalNLP.STOPWORDS_DIR + "/english_mark_stop.txt")
    en_global_stop = read_to_1d_list_gbk(GlobalNLP.STOPWORDS_DIR + "/english_stopwords.txt")
    return set(cn_mark_stop + cn_waste_content + cn_global_stop + en_char_stop + en_mark_stop + en_global_stop)


def cn_word_segment(sentence):
    '''
    分词并词性标注
    :param sentence: 字符串
    '''
    # jb.load_userdict("user_dict.txt")
    
    segment = jbp.cut(sentence)  # 词性标注
    result_list = []
    for item in segment:
        result_list.append(item.word.strip() + GlobalNLP.CN_WORD_INNER_DELIMITER + item.flag.strip())
        
    return result_list


def cn_filtered_word_segment(sentence, stopwords_list):
    '''
    分词并词性标注，同时去除停用词
    :param sentence: 字符串
    :param stopwords_list: 停用词表
    '''
    # jb.load_userdict("user_dict.txt")
    segment = jbp.cut(re.sub(r'[_~=/\+\-\*\|\\]', " ", sentence))  # 词性标注
    result_list = []
    for item in segment:
        if (item.word not in stopwords_list) and (item.flag.strip() not in ["x"]):
            result_list.append(item.word.strip() + GlobalNLP.CN_WORD_INNER_DELIMITER + item.flag.strip())
        
    return result_list


def en_word_segment(sentence):
    '''
    英文分词并词性标注
    :param sentence: 字符串
    '''

    segment_text = nltk.word_tokenize(re.sub(r'[_~=/\+\-\*\|\\]', " ", sentence))
    segment = nltk.pos_tag(segment_text)  # 词性标注
            
    result_list = []
    for each in segment:
        result_list.append(each[0].lower() + GlobalNLP.EN_WORD_INNER_DELIMITER + each[1])
        
    return result_list


def en_filtered_word_segment(sentence, stopwords_list):
    '''
    英文分词并词性标注，同时去除停用词
    :param sentence: 字符串
    :param stopwords_list: 停用词表
    '''
    
    segment_text = nltk.word_tokenize(re.sub(r'[_~=/\+\-\*\|\\]', " ", sentence))
    segment = nltk.pos_tag(segment_text)  # 词性标注
    
    waste_pos = [":", "\'\'", "``"]
            
    result_list = []
    for each in segment:
        if (each[0].lower() not in stopwords_list) and (each[1] not in waste_pos):
            result_list.append(each[0].lower() + GlobalNLP.EN_WORD_INNER_DELIMITER + each[1])
        
    return result_list


if __name__ == '__main__':
    # print(cn_word_segment("南京市长江大桥"))
    print(cn_filtered_word_segment("南京市长江大桥", ["啊"]))
    