# -*- coding:utf-8 -*-
__author__ = 'root'
import os
import jieba
import codecs
import jieba.posseg as posseg
import pandas as pd
from pandas import DataFrame
from kaoala_reception_handle import word_dcit,material_handle
from common_module.path_handle import reception_base_path,kaoala_jieba_dict,reception_user_search_data
from common_module.pub_utils import *
from search.models import *









def __update_cihui(char_path=None,*args):
    '''
    对词汇进行分类出来，新词 旧词 同时出现的词汇
    :param args: 词汇
    :param char_path  词库文件路径
    :return:
    '''

    simultaneously_word=[]   #同时出现的词汇
    if char_path==None or os.path.isfile(char_path)==False:
        return False
    #jieba.load_userdict(char_path)
    word_list=[]
    old_words_list=[]   #文件中现有的词汇
    with codecs.open(char_path,mode='r',encoding='utf-8') as f:
        for line in f.readlines():
            line_list=line.split()
            if line_list.__len__()<3:
                continue
            old_words_list.append(line_list[0])

    for sub_key in args:
        if sub_key.__len__()<2:  #字符数要大于2个
            continue
        if is_chinese(sub_key)==False:  #是否为汉字
            continue
        simultaneously_word.append(sub_key)
        if sub_key in exclude_part_of_speech:   #如果词性是在list中的不增加到词库中
            continue
        word_list.append(sub_key)
    old_words=[]   #旧词汇
    new_words=[]   #新词汇
    for sub_key in list(set(word_list)):
        if sub_key not in old_words_list:  #如果是新词汇
            new_words.append(sub_key)
        else:
            old_words.append(sub_key)
    __cihui_totalizer(*old_words)
    __add_cihui(*new_words)
    __simultaneously_words(*simultaneously_word)
    return True


def __cihui_totalizer(*args):
    '''
    对旧词汇的搜索次数进行次数叠加，不要传人新词汇
    :param args: 旧词汇
    :return:
    '''
    #jieba.load_userdict(char_path)
    word_list=[]
    for sub_key in args:
        # if sub_key.__len__()<2:  #字符数要大于2个
        #     continue
        word_list.append(sub_key)
    for sub_word in word_list:
        word_obj=words_storehouse.objects.filter(word=sub_word)
        if word_obj.exists()==True:
            one_word_obj=word_obj[0]
            one_word_obj.count=one_word_obj.count+1
            one_word_obj.save()


def __add_cihui(*args):
    '''
    对新词汇进行储存
    :param args: 新词汇
    :return:
    '''
    colunms=['word']  #文件的列名字
    add_char_path=os.path.join(reception_user_search_data,'new_words.csv')
    if isinstance(add_char_path,basestring)==False:
        return False
    if args.__len__()<1:
        return False

    if os.path.isfile(add_char_path)==True:
        add_ward_dataframe=pd.read_csv(add_char_path,encoding='utf-8')  #获取文件的数据转成DataFrame
        for sub_key in args:
            if isinstance(sub_key,basestring)==True and sub_key!='':  #词汇不为空
                add_ward_dataframe.loc[len(add_ward_dataframe)]=[sub_key]
    else:
        new_words=[]
        for sub_key in args:
            if isinstance(sub_key,basestring)==True and sub_key!='':  #词汇不为空
                new_words.append(sub_key)
        data={'word':new_words}
        add_ward_dataframe=DataFrame(data=data,columns=colunms)
    add_ward_dataframe.to_csv(add_char_path,encoding='utf-8',index=False)
    return True



def __simultaneously_words(*args):
    '''
    对同时出现的词汇进行储存
    :param args: 同时出现的词汇
    :return:
    '''
    simultaneously_path=os.path.join(reception_user_search_data,'simultaneously_words.csv')
    colunms=['words']  #文件的列名字

    if args.__len__()<1:
        return False

    if os.path.exists(reception_user_search_data)==False:
        os.makedirs(reception_user_search_data)
    if os.path.isfile(simultaneously_path)==True:
        ward_dataframe=pd.read_csv(simultaneously_path,encoding='utf-8')  #获取文件的数据转成DataFrame
        ward_dataframe.loc[len(ward_dataframe)]=[';'.join(args)]
    else:
        data={'words':[';'.join(args)]}
        ward_dataframe=DataFrame(data=data,columns=colunms)
    ward_dataframe.to_csv(simultaneously_path,encoding='utf-8',index=False)
    return True





def __seach_char(seach_text=None):
    '''
    使用搜索引擎模式对待搜索文本进行分词
    :param seach_text: 文本
    :return:
    '''
    char_path=kaoala_jieba_dict    #词库的路径
    items={}
    items['code']=1  #0为分词成功 1为分词失败
    items['msg']=''   #提示
    items['data']=[]   #分词之后的词组
    if isinstance(seach_text,basestring)==False or seach_text=='':
        items['msg']=u'需分词的文本错误'
        return items
    new_str=u''
    for sub_key in seach_text:
        if is_other(sub_key)==True:  #判断单个字符是否为符号
            continue
        else:
            new_str=new_str+sub_key
    jieba.load_userdict(char_path)    #配置分词表
    items['data']=jieba.lcut_for_search(new_str)
    items['data']+=[seach_text]
    args=items['data']
    __update_cihui(char_path,*args)
    items['code']=0
    items['msg']=u'分词成功'
    return items



def kaoala_search_main(search_text=None,flag=None):
    '''
    整个前台搜索系统的入口
    :param search_text: 待搜索的文本
    :return:
    '''
    search_items={}
    items=__seach_char(search_text)
    word_dcit_obj=word_dcit()
    handle_obj=material_handle()
    if items['code']==0:
        word_items=word_dcit_obj.filter_words(*items['data'])
        if word_items['code']==0:
            search_items=handle_obj.material_handle_main(*(word_items['data'].keys()),**{'flag':flag})
    return search_items





# if __name__ == '__main__':
#     main(u"""中国 印度的海洋季风气候""")

