# -*- coding:utf-8 -*-
__author__ = 'root'
import codecs

import pandas as pd
from pandas import DataFrame
import os
from common_module.source_handles.kaoala_source_datafeici import source_seach_char
from common_module.path_handle import current_source_path


class word_dict:
    def __init__(self,word_dict_path=None):
        self.__word_dict_pd_name=os.path.join(current_source_path,'word_dict.csv')
        self.__word_dict_path=word_dict_path   #分词词汇文件
        self.words=[]   #分词词汇
        self.__columns=['word']
        self.__get_words()

    def __get_words(self):
        '''
        获取分词词汇
        :return:
        '''
        with codecs.open(self.__word_dict_path,mode='r',encoding='utf-8') as f:
            for line in f.readlines():
                line_list=line.split()
                if line_list.__len__()<3:
                    continue
                self.words.append(line_list[0])

    def writ_word_dict(self):
        '''
        固化分词表
        :return:
        '''
        data={'word':self.words}
        word_dataframe=DataFrame(data=data,columns=self.__columns)
        word_dataframe.to_csv(self.__word_dict_pd_name,encoding='utf-8',index=False)



    # def read_word(self):
    #     makes=pd.read_csv(self.__word_dict_pd_name,encoding='utf-8')
    #     print type(makes)
    #     #print makes.query("index==1")
    #     rest=makes[makes['word'].isin([u'向斜',u'外流河'])]
    #     print rest.index.values
    #     for i in rest.word.values:
    #         print i


class word_word_proportion:
    def __init__(self):
        self.__word_dict_pd_name=os.path.join(current_source_path,'word_dict.csv')
        self.__proportion_file_path=os.path.join(current_source_path,'word_word_proportion.csv')   #分词与分词之间的比重关系
        self.__columns=['word_id','proportion_1','proportion_2','proportion_3','proportion_4','proportion_5','proportion_6']

    def wird_proportion(self):
        '''
        固化分词与分词之前比重表
        :return:
        '''
        word_makes=pd.read_csv(self.__word_dict_pd_name,encoding='utf-8')
        word_index_list=word_makes.index.values

        data={'word_id':word_index_list,'proportion_6':[str(i) for i in word_index_list]}

        makes=DataFrame(data=data,columns=self.__columns)
        makes.to_csv(self.__proportion_file_path,encoding='utf-8',index=False)




class handle_material:
    def __init__(self):
        self.__word_dict_pd_name=os.path.join(current_source_path,'word_dict.csv')
        self.__word_material_proportion_file_path=os.path.join(current_source_path,'word_material_proportion.csv')   #词库与素材权重映射
        self.__proportion_material_file_path=['weights_1.csv','weights_2.csv','weights_3.csv','weights_4.csv','weights_5.csv','weights_6.csv']   #权重与素材映射表名字
        self.__word_material_proportion_colunms=['word_id','weights_1','weights_2','weights_3','weights_4','weights_5','weights_6']   #词库与素材权重映射的字段名字
        self.__proportion_material_colunms=['father_id','material_id','p_category']     #权重与素材映射表的字段名字
        self.__write_word_material_proportion()



    def __write_word_material_proportion(self):
        '''
        固化词库与素材权重映射表的数据
        :return:
        '''
        word_makes=pd.read_csv(self.__word_dict_pd_name,encoding='utf-8')
        word_index=word_makes.index.values

        data={'word_id':word_index,'weights_1':['weights_1.csv' for i in word_index],'weights_2':['weights_2.csv' for i in word_index],'weights_3':['weights_3.csv' for i in word_index],'weights_4':['weights_4.csv' for i in word_index],
              'weights_5':['weights_5.csv' for i in word_index],'weights_6':['weights_6.csv' for i in word_index]}
        makes=DataFrame(data=data,columns=self.__word_material_proportion_colunms)
        makes.to_csv(self.__word_material_proportion_file_path,encoding='utf-8',index=False)


    def __word_fenci(self,text=None,p_category=None):
        '''
        对待分词文本进行分词，返回分词结果, 内部函数
        :param text: 待分词文本 type string
        :param p_category: 素材类型 type int  1文本题目 2图片题目 3提示题目4 文本答案 5图片答案 6视频考点 7互动考点
        :return:
        '''
        items={}
        items['code']=1  #0为成功 1为失败
        items['data']=[]
        items['p_type']=None
        items['p_category']=None
        items['msg']=u''
        if isinstance(text,basestring)==False or text=='':
            items['msg']=u'待分词文本为空'
            return items
        if isinstance(p_category,int)==False:
            items['msg']=u'素材所属类错误'
            return items
        participle=source_seach_char(seach_text=text)
        if participle['code']==1:
            items['msg']=participle['msg']
            return items
        items['code']=0
        items['data']=participle['data']
        items['p_category']=p_category
        items['msg']=u'分词成功'
        return items


    def write_proportion_material(self,text=None,p_category=None,material_id=None):
        '''

        :param text: 待分词文本 type string
        :param p_category: 素材类型 type int  1文本题目 2图片题目 3提示题目4 文本答案 5图片答案 6视频考点 7互动考点
        :param material_id: 素材id type int
        :return:
        '''
        participle=self.__word_fenci(text=text,p_category=p_category)
        word_makes=pd.read_csv(self.__word_dict_pd_name,encoding='utf-8')

        participle_dataframe=word_makes[word_makes['word'].isin(participle['data'])]   #过滤出来的分词 dataframe
        participle_dataframe_index=participle_dataframe.index.values
        #participle_dataframe_word=participle_dataframe.word.values
        word_material_makes=pd.read_csv(self.__word_material_proportion_file_path,encoding='utf-8')
        word_material_dataframe=word_material_makes[word_material_makes['word_id'].isin(participle_dataframe_index)]

        word_material_dataframe_index=word_material_dataframe.index.values   #词库与素材权重映射的index
        word_material_dataframe_word=word_material_dataframe.weights_6.values    #获取到各个分词对应的表名字
        if word_material_dataframe_index.__len__()>0:   #有词库与素材权重映射表的数据
            material_table=os.path.join(current_source_path,word_material_dataframe_word[0])   #表名字

            if os.path.isfile(material_table)==True:  #判断文件是否存在
                weights_6_dataframe=pd.read_csv(material_table,encoding='utf-8')
                for sub_index in word_material_dataframe_index:
                    weights_6_filter=weights_6_dataframe[(weights_6_dataframe['father_id'].isin([sub_index]))]
                    weights_6_filter=weights_6_filter[weights_6_filter['material_id'].isin([material_id])]
                    weights_6_filter= weights_6_filter[(weights_6_filter['p_category'].isin([p_category]))]   #过滤出素材对应的记录是否存在

                    if weights_6_filter.index.values.__len__()<1:  #判断过滤出来的数据是否为空
                        weights_6_dataframe.loc[len(weights_6_dataframe)]=[sub_index,material_id,p_category]
            else:
                data={'father_id':word_material_dataframe_index,'material_id':[material_id for i in word_material_dataframe_index],'p_category':[p_category for i in word_material_dataframe_index]}
                weights_6_dataframe=DataFrame(data=data,columns=self.__proportion_material_colunms)
            weights_6_dataframe.to_csv(material_table,encoding='utf-8',index=False)









class material_solidify:
    def __init__(self):
        self.__material_solidify_file_path=os.path.join(current_source_path,'material_solidify.csv')   #s素材固化表
        self.__material_solidify_colunms=['qtid','qcid','hid','atid','acid','mid','inid','pic']



    def write_material_solidify(self,material_id=None,p_category=None,pic_meida_path=''):
        '''
        所素材进行固化
        :param material_id: 素材id type int
        :param p_category:  素材类型 type int 1文本题目 2图片题目 3提示题目4 文本答案 5图片答案 6视频考点 7互动考点
        :param pic_meida_path: 图片或视频的路径
        :return:
        '''
        items={}
        items['code']=1  #0是成功 1为失败
        items['msg']=u''
        if p_category not in [1,2,3,4,5,6,7]:
            items['msg']=u'素材的类型错误'
            return items
        if isinstance(material_id,int)==False:
            items['msg']=u'素材的id错误'
            return items
        material_id=str(material_id)   #转换成字符串
        if os.path.isfile(self.__material_solidify_file_path)==True:  #判断文件是否存在
            solidify_makes=pd.read_csv(self.__material_solidify_file_path,encoding='utf-8')   #读取csv转成Dataframe
            solidify_filter=solidify_makes[solidify_makes[self.__material_solidify_colunms[p_category-1]].isin([material_id])]
            solidify_index=solidify_filter.index.values
            if solidify_index.__len__()<1:
                one_data=['','','','','','','',pic_meida_path]
                one_data[p_category-1]=material_id
                solidify_makes.loc[len(solidify_makes)]=one_data
        else:
            data={'qtid':[material_id if p_category==1 else '' ],'pcid':[material_id if p_category==2 else '' ],'hid':[material_id if p_category==3 else '' ],'atid':[material_id if p_category==4 else '' ],
                  'acid':[material_id if p_category==5 else '' ],'mid':[material_id if p_category==6 else '' ],'inid':[material_id if p_category==7 else '' ],'pic':pic_meida_path}
            solidify_makes=DataFrame(data=data,columns=self.__material_solidify_colunms)
        solidify_makes.to_csv(self.__material_solidify_file_path,encoding='utf-8',index=False)



