# -*- coding:utf-8 -*-
import codecs
import logging
import os
from django.core.management import BaseCommand
from common_module.pub_utils import is_chinese, is_number
from common_module.source_handles.kaoala_source_datafeici import source_seach_char
from kao_media.models import resources, material_manager, subject_desc
from kao_question.models import kao_answer, kao_question,kao_exam,kao_exam_area
from search.models import words_storehouse
log = logging.getLogger('django_crontab.crontab')
from common_module.path_handle import source_path_handle, source_file_copy,kaoala_jieba_dict, reception_base_path, \
    reception_user_search_data, base_path
import pandas as pd
import jieba.posseg as posseg

__author__ = 'root'

exclude_part_of_speech=['v','vd','vn','vshi','vyou','vf','vx','vi','vl','vg',   #动词
                        'ad','an','ag','al','a', #形容词
                        'd',   #副词
                        'u','uj','ud','ug','ul','uv','uz',
                        'p',
                        'c',
                        'y',
                        'e',
                        'o',
                        'h',
                        'k',
                        'w',
                        'b','bl',
                        'r','rg',
                        'mq',
                        ]

def write_word_to_dict(kaola_dict_path=None):
    '''
    2016-09-26 add by jack
    根据所有资源的分词进行更新word dcit文件
    :param kaola_dict_path:
    :return:
    '''
    question_qbody_dict={}
    question_qhins_dict={}
    question_pic_dcit={}

    answer_answerText_dcit={}
    answer_corrent_dict={}

    video_desc_dict={}

    video_dict={}

    resources_tags_dict={}
    resources_title_dict={}

    all_words_list=[]
    old_words_list=[]    #当前分词list
    with codecs.open(kaola_dict_path,mode='r',encoding='utf-8') as f:
        for line in f.readlines():
            one_line_words=[]   #一个分词记录
            line_list=line.split()
            if line_list.__len__()<3:
                continue
            one_line_words=[line_list[0],line_list[1],line_list[2]]
            old_words_list.append(line_list[0])
            all_words_list.append(one_line_words)


    log.info(u'start 提取题目数据')
    question_list=kao_question.objects.all()
    for sub_question in question_list:
        qbody=sub_question.qbody
        if qbody!=None and qbody!='':
            question_qbody_dict[int(sub_question.id)]=qbody
        qhins=sub_question.qhins
        if qhins!=None and qhins!='':
            question_qhins_dict[int(sub_question.id)]=qhins

        if sub_question.pic!=None and sub_question.pic!='':
            question_pic_dcit[int(sub_question.id)]=sub_question.pic
    log.info(u'end 提取题目数据')

    log.info(u'start 提取答案数据')
    answer_list=kao_answer.objects.all()
    for sub_answer in answer_list:
        if sub_answer.answerText!=None and sub_answer.answerText!='':
            answer_answerText_dcit[int(sub_answer.id)]=sub_answer.answerText
        if sub_answer.corrent_answer!=None and sub_answer.corrent_answer!='':
            answer_corrent_dict[int(sub_answer.id)]=sub_answer.corrent_answer
    log.info(u'end 提取答案数据')

    log.info(u'start 提取视频描述数据')
    desc_list=subject_desc.objects.all()
    for sub_desc in desc_list:
        if sub_desc.video_conclusion!=None and sub_desc.video_conclusion!='':
            video_desc_dict[int(sub_desc.subject_id)]=sub_desc.video_conclusion

    log.info(u'end 提取视频描述数据')

    log.info(u'start 提取视频地址数据')
    video_list=material_manager.objects.filter(mtype=1,purpose=0)
    for sub_video in video_list:
        video_dict[int(sub_video.subject.all()[0].id)]=sub_video.material_url
    log.info(u'end 提取视频地址数据')


    log.info(u'start 提取额外资源数据')
    res_list=resources.objects.all()
    for sub_res in res_list:
        resources_tags_dict[int(sub_res.id)]=u''.join(map(lambda  x:x['tag_name'],sub_res.resources_tags_set.values('tag_name')))
        resources_title_dict[int(sub_res.id)]=sub_res.resources_name
    log.info(u'end 提取额外资源数据')


    log.info(u'start 提取试卷年份数据')
    exam_list=kao_exam.objects.values('year_of_exam')
    exam_itmes=list(set(map(lambda x:x['year_of_exam'],exam_list)))
    exam_times_list=[]
    for sub_time in exam_itmes:
        exam_times_list.append(sub_time.strftime('%Y'))
    log.info(u'end 提取试卷年份数据')

    log.info(u'start 提取试卷地区数据')
    exam_area_list=kao_exam_area.objects.values('province')
    exam_area_itmes = list(set(map(lambda x: x['province'], exam_area_list)))
    log.info(u'end 提取试卷地区数据')



    log.info(u'start 进行分词')
    all_text_list=question_qbody_dict.values()+question_qhins_dict.values()+answer_answerText_dcit.values()+answer_corrent_dict.values()+video_desc_dict.values()
    all_text=u''.join(all_text_list)
    items=source_seach_char(seach_text=all_text)
    all_word_list=[]
    if items['code']==0:
        all_word_list=list(set(items['data']))
    log.info(u'end 进行分词')

    log.info(u'start 额外资源分词')
    all_res_text=u''.join(resources_tags_dict.values())
    all_res_text+=u''.join(resources_title_dict.values())
    items=source_seach_char(seach_text=all_res_text)
    if items['code']==0:
        all_word_list+=list(set(items['data']))
    # log.info(u'all_word_list %s' %all_word_list)
    log.info(u'end 额外资源分词')

    log.info(u'start 试卷年份和地区的分词')
    all_word_list+=exam_times_list
    all_area_text=u''.join(exam_area_itmes)
    items = source_seach_char(seach_text=all_area_text)
    if items['code'] == 0:
        all_word_list += list(set(items['data']))

    log.info(u'end 试卷年份和地区的分词')



    log.info(u'start 根据所有资源的分词进行更新word dcit文件')
    for sub_items in all_word_list:
        for sub_word in posseg.cut(sub_items):
            if sub_word.word.__len__()<2:  #字符数要大于2个
                continue
            if sub_word.flag in exclude_part_of_speech:   #如果词性是在list中的不增加到词库中
                continue
            if is_chinese(sub_word.word)==False and is_number(sub_word.word)==False:  #是否为汉字或者数字
                continue
            if sub_word.word not in old_words_list:
                all_words_list.append([sub_word.word,u'1',sub_word.flag])   #对本地word dict文件增加词汇
                old_words_list.append(sub_word.word)
    log.info(u'end 根据所有资源的分词进行更新word dcit文件')




    log.info(u'start 写入word dict')
    with codecs.open(kaola_dict_path,mode='w',encoding='utf-8') as f:
        for sub_word in all_words_list:
            sub_line=u' '.join(sub_word)+u'\n'
            f.write(sub_line)
    log.info(u'end 写入word dict')





def word_dict_sync_db(kaola_dict_path=None):
    '''
    分词文件和数据库进行同步
    2016-09-08 add by jack
    :param kaola_dict_path:
    :return:
    '''
    all_words_list=[]
    log.info(u'start 获取新的词汇')
    new_words_dict=handle_new_words()
    log.info(u'end 获取新的词汇')
    old_words_list=[]    #当前分词list
    with codecs.open(kaola_dict_path,mode='r',encoding='utf-8') as f:
        for line in f.readlines():
            one_line_words=[]   #一个分词记录
            line_list=line.split()
            if line_list.__len__()<3:
                continue
            one_line_words=[line_list[0],line_list[1],line_list[2]]
            old_words_list.append(line_list[0])
            all_words_list.append(one_line_words)
    for one_word in all_words_list:
        word=one_word[0]
        count=one_word[1]
        property=one_word[2]
        words_list=words_storehouse.objects.filter(word=word)   #获取word数据库记录
        if words_list.exists()==True:
            one_words_obj=words_list[0]
            try:
                count_int=int(count)
            except Exception,e:
                count_int=1
            if one_words_obj.count<count_int:
                one_words_obj.count=count_int
                one_words_obj.save()
            elif one_words_obj.count>count_int:
                one_word[1]=unicode(one_words_obj.count)
        else:
            try:
                count_int=int(count)
            except Exception,e:
                count_int=1
            try:
                words_storehouse.objects.create(word=word,count=count_int,word_property=property)
            except Exception,e:
                log.error(e)
                log.warning(u'创建分词记录失败')

    for key,value in new_words_dict.items():
        if key not in old_words_list:
            try:
                words_storehouse.objects.create(word=key,count=1,word_property=value)
                all_words_list.append([key,u'1',value])
            except Exception,e:
                log.error(e)
                log.warning(u'添加性词到数据表失败')
    log.info(u'start 写入word dict')
    with codecs.open(kaola_dict_path,mode='w',encoding='utf-8') as f:
        for sub_word in all_words_list:
            sub_line=u' '.join(sub_word)+u'\n'
            f.write(sub_line)
    log.info(u'end 写入word dict')


def handle_new_words():
    words_dataframe=pd.read_csv(os.path.join(reception_user_search_data,'new_words.csv'),encoding='utf-8')
    words_list=words_dataframe.word.values
    words_list=list(set(words_list))
    new_words={}
    for sub_key in words_list:
        for sub_word in posseg.cut(sub_key):
            if sub_word.word.__len__()<2:  #字符数要大于2个
                continue
            if sub_word.flag in exclude_part_of_speech:   #如果词性是在list中的不增加到词库中
                continue
            if is_chinese(sub_word.word)==False:  #是否为汉字
                continue
            new_words[sub_word.word]=sub_word.flag
    return new_words




class Command(BaseCommand):
    def handle(self, *args, **options):
        log.info(u'start 更新word dict 文件')
        write_word_to_dict(kaola_dict_path=kaoala_jieba_dict)
        log.info(u'end 更新word dict 文件')

        log.info(u'start 数据库分词词库记录同步')
        word_dict_sync_db(kaola_dict_path=kaoala_jieba_dict)
        log.info(u'end 数据库分词词库记录同步')