import os
import time

from dataPath import resultdirBasepath,searchRecordspath
from stopWordDic import inStopwordDic
import jieba
#import pkuseg
#from pyhanlp import *
import thulac

jieba.setLogLevel(jieba.logging.INFO)
thu1 = thulac.thulac(seg_only=True)
class SearchAndFenci():
    def __init__(self):
        #dic来进行存储中介关键字的
        self.dic = {}
        self.results1 = []
        self.fenci = []
        self.fenciWithStopWordDic = []

    def jiebafenci(self, j, k):
        ###start_time = time.time()
        jieba1 = jieba.cut(j, cut_all=False, HMM=False)
        str1 = " ".join(jieba1)
        if k in str1:
            list2 = str1.split(" ")
            for i in list2:
                #去掉虚词连词等，不加入中介关键词中
                if inStopwordDic(i):
                    list2.remove(i)
                    continue
                #不和种子关键字相等
                if i!=k:
                    #解决分词的包含问题，比如湖南卫视包含湖南，需要再分
                    if k not in i:
                        if  i not in self.dic:
                            self.dic[i]=1
                        else :
                            self.dic[i]+=1
                    #解决上面的，采用去除种子关键字
                    else :
                        newMidKey = i.strip(k)
                        if  newMidKey not in self.dic:
                            self.dic[newMidKey]=1
                        else :
                            self.dic[newMidKey]+=1
            str2 = " ".join(list2)
            self.results1.append(j)
            self.fenci.append(str1)
            self.fenciWithStopWordDic.append(str2)
            ###end_time = time.time()
            ###print("一次分种子词循环时间{:.4f}秒".format(end_time - start_time))
    """
    def pkusegfenci(j, k):
        seg = pkuseg.pkuseg()
        text = seg.cut(j)
        if k in text:
            fenci.append(" ".join(text))
            results1.append(j)
    """
    def thulacfenci(self, j, k):
        text1 = thu1.cut(j, text=True)
        if k in text1:
            self.fenci.append(text1)
            self.results1.append(j)


    # def pyhanlpfenci(j, k):
    #     words = HanLP.segment(j)
    #     for term in words:
    #         if k in term.word:
    #             # fenci.append(words)
    #             results4.append(j)

    def outputResult(self,keyWord):
        newfile = open(resultdirBasepath + keyWord + "\\result.txt", 'w', encoding='utf-8')
        for i in self.results1:
            newfile.write(i + '\n')
            pass
        print("--" + keyWord + "--搜索结果已完成写入")
        newfile.close()

    def creatDir(self, keyWord):
        if not os.path.exists(resultdirBasepath + keyWord):
            os.makedirs(resultdirBasepath + keyWord)

    def outputFenci(self, keyWord):
        fencifile = open(resultdirBasepath + keyWord + "\\fenci.txt", 'w', encoding='utf-8')
        for i in self.fenci:
            fencifile.write(i + '\n')
            pass
        print("--" + keyWord + "--分词结果已完成写入")
        fencifile.close()

    def outputFenciwithStopDic(self, keyWord):
        fenciwithstopDicfile = open(resultdirBasepath + keyWord + "\\fenciWithStopDIc.txt", 'w', encoding='utf-8')
        for i in self.fenciWithStopWordDic:
            fenciwithstopDicfile.write(i + '\n')
            pass
        print("--" + keyWord + "--停词分词结果已完成写入")
        fenciwithstopDicfile.close()

    def outputMidKeyWithStopDic(self, keyWord):
        midkeyfile = open(resultdirBasepath + keyWord + "\\MidKey.txt", 'w', encoding='utf-8')
        list = dict(sorted(self.dic.items(), key=lambda x: x[1], reverse=True)).items()
        for key, value in list:
            midkeyfile.write(key +" "+str(value)+'\n')
            pass
        print("--" + keyWord + "--中介关键词结果已完成写入")
        midkeyfile.close()


    def appendSearchRecords(self,keyWord):
        newfile = open(searchRecordspath, 'a', encoding='utf-8')
        newfile.write(keyWord+'\n')
        print("--" + keyWord + "--搜索已经追加进搜索记录，下次可直接查询")
        newfile.close()

