import os
import time

from dataPath import resultdirBasepath,searchRecordspath
from stopWordDic import inStopwordDic
import jieba
#import pkuseg
#from pyhanlp import *
import thulac

jieba.setLogLevel(jieba.logging.INFO)
thu1 = thulac.thulac(seg_only=True)
class FenciForComp():
    def __init__(self, midkeydic):
        #这里直接将中介词表作为成员变量，就不需要每次都要作为函数参数进行传入
        self.midkeydic = midkeydic
        # compdic来进行存储竞争关键字的
        self.compdic = {}
    #j是搜索文件中的一行,s是种子关键字,midkeydic是中介关键字词典，直接传入减少分词次数提高效率
    def jiebafenci(self, j, s):
        ###start_time =time.time()
        jieba1 = jieba.cut(j, cut_all=False, HMM=False)
        str1 = " ".join(jieba1)
        #这里采用将分割放在循环外部，提高效率，但是不明显
        complist = str1.split(" ")
        #k是中介关键词的键，v是频次，可以用于加权的权重
        for k, v in self.midkeydic.items():
            if k in str1:
                #complist = str1.split(" ")
                #i是分词后的短语
                for i in complist:
                    #去掉虚词连词等，不加入竞争关键词中
                    if inStopwordDic(i):
                        complist.remove(i)
                        continue
                    #不能与种子关键字和中介关键字相等
                    ##
                    ##这里用点问题，只判断当前的中介关键字够不够，还是去循环判断所有的中介关键字
                    ##
                    if i != s and i != k:
                        #如果不在不一定不是竞争关键词，可以使用优化
                        #这里是不在,其实就是竞争关键字了，直接统计就可以
                        if k not in i:
                            if i not in self.compdic:
                                #进行加权，v是中介关键词的权重
                                self.compdic[i]=1*v
                            else:
                                self.compdic[i]+=1*v
                        #解决上面提到的优化问题，比如湖南的竞争关键词一定有湖北，但是分词分出的是湖北省，包含中介关键字（省），所以拆分去除省得到湖北，采用去除中介关键字
                        else:
                            newCompKey = i.strip(k)
                            if newCompKey not in self.compdic:
                                self.compdic[newCompKey]=1*v
                            else:
                                self.compdic[newCompKey]+=1*v
            ###end_time = time.time()
            ###print("一次中介分词循环时间{:.4f}秒".format(end_time - start_time))


    """
    def pkusegfenci(j, k):
        seg = pkuseg.pkuseg()
        text = seg.cut(j)
        if k in text:
            fenci.append(" ".join(text))
            results1.append(j)
    """
    def thulacfenci(self, j, k):
        text1 = thu1.cut(j, text=True)
        if k in text1:
            self.fenci.append(text1)
            self.results1.append(j)


    # def pyhanlpfenci(j, k):
    #     words = HanLP.segment(j)
    #     for term in words:
    #         if k in term.word:
    #             # fenci.append(words)
    #             results4.append(j)

    #将竞争关键字进行输出
    def outputComp(self,keyWord):
        newfile = open(resultdirBasepath + keyWord + "\\compKey.txt", 'w', encoding='utf-8')
        list = dict(sorted(self.compdic.items(), key=lambda x: x[1], reverse=True)).items()
        for key, value in list:
            newfile.write(key + " " + str(value) + '\n')
            pass
        print("--" + keyWord + "--竞争关键字关键词结果已完成写入")
        newfile.close()

