# -*- coding:utf-8 -*-
from common_module.path_handle import base_path,kaoala_jieba_dict

__author__ = 'root'
import os
import jieba
import codecs
from common_module.pub_utils import *







def source_seach_char(seach_text=None):
    '''
    使用搜索引擎模式对待搜索文本进行分词,返回的分词是在分词表中存在的分词
    :param seach_text: 文本
    :return:
    '''
    char_path=kaoala_jieba_dict    #词库的路径
    items={}
    items['code']=1  #0为分词成功 1为分词失败
    items['msg']=''   #提示
    items['data']=[]   #分词之后的词组
    has_word_list=[]   #在分词表中的分词
    filter_word=[]    #根据分词表过滤之后的分词

    if isinstance(seach_text,basestring)==False or seach_text=='':
        items['msg']=u'需分词的文本错误'
        return items
    new_str=u''
    for sub_key in seach_text:
        if is_other(sub_key)==True:  #判断单个字符是否为符号
            continue
        else:
            new_str=new_str+sub_key
    jieba.load_userdict(char_path)    #配置分词表
    items['data']=jieba.lcut_for_search(new_str)
    with codecs.open(char_path,mode='r',encoding='utf-8') as f:
        for line in f.readlines():
            line_list=line.split()
            if line_list.__len__()<3:
                continue
            has_word_list.append(line_list[0])
    for sub_item in items['data']:
        if sub_item in has_word_list:
            filter_word.append(sub_item)
    items['data']=filter_word
    items['code']=0
    items['msg']=u'分词成功'
    return items
