#!/usr/bin/python3

import threading
import os
from dataPath import resultdirBasepath,searchRecordspath
from dboperation import save_compwordkeys,insert_record
from stopWordDic import inStopwordDic

class compwordkeysThread (threading.Thread):

    def __init__(self, threadID, name,  SeedKeyword, fencilog, midkeynum,num_seedwordkey,optimization):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.name = name
        self.SeedKeyword=SeedKeyword
        self.fencilog = fencilog
        self.compwordkeysdic = {}
        self.midwordkeysdic = {}
        midkeysfile = open(resultdirBasepath + self.SeedKeyword + "\\MidKey.txt", 'r', encoding='utf-8')
        #通过传进来的参数进行中介字典读取
        for i in range(int(midkeynum)):
            #这是一个字符串列表，但是只有键值两个值
            tempkeyvalue = midkeysfile.readline().split()
            if not any(tempkeyvalue):
                break
            #字典存入键值对
            self.midwordkeysdic[str(tempkeyvalue[0])] = int(tempkeyvalue[1])
        midkeysfile.close()
        self.optimization = optimization
        #获取种子搜索量，从而算出中介权重
        self.num_seedwordkey = num_seedwordkey
    def run(self):
        print ("开始线程：" + self.name + "并求取---" + self.SeedKeyword + "---竞争关键字")
        if(not self.optimization):
            #中介字典的遍历
            for k, v in self.midwordkeysdic.items():
                one_midwordkey_num = 0
                #搜索记录的一行数据
                #这个for循环真没有什么意义，只是单纯为了统计一遍中介出现数量，好为竞争度计算
                for i in self.fencilog:
                    i = i.decode('utf-8')
                    #如果中介关键字在记录的一行中
                    if k in i:
                        #中介关键字出现次数加一
                        one_midwordkey_num +=1
                for  i in self.fencilog:
                    #如果中介关键字在记录的一行中
                    i = i.decode('utf-8')
                    if k in i:
                        #将该行进行切分
                        complist = i.split()
                        #i是分词后的短语
                        for word in complist[:]:
                            #去掉虚词连词等，不加入竞争关键词中
                            if inStopwordDic(word):
                                #complist.remove(word)
                                continue
                            #不能与种子关键字和中介关键字相等
                            ##
                            ##这里用点问题，只判断当前的中介关键字够不够，还是去循环判断所有的中介关键字
                            ##
                            if word != self.SeedKeyword and word != k:
                                #如果不在不一定不是竞争关键词，可以使用优化
                                #这里是不在,其实就是竞争关键字了，直接统计就可以
                                if k not in word:
                                    if word not in self.compwordkeysdic:
                                        #进行加权，v/self.num_seedwordkeys是中介关键词的权重，但是还是没有算出竞争度，所以还要除以mid出现次数-mid_and_seed出现次数(即字典中的value)
                                        self.compwordkeysdic[word]=1*(v/self.num_seedwordkey)/(one_midwordkey_num-v)
                                    else:
                                        self.compwordkeysdic[word]+=1*(v/self.num_seedwordkey)/(one_midwordkey_num-v)
                                # #
                                # #
                                #这个优化是不是正优化，比如华为，荣耀，王者
                                # #
                                # #        
                                #解决上面提到的优化问题，比如湖南的竞争关键词一定有湖北，但是分词分出的是湖北省，包含中介关键字（省），所以拆分去除省得到湖北，采用去除中介关键字
                                else:
                                    newCompKey = word.strip(k)
                                    # 还是要确保两个字以上
                                    if len(newCompKey) <= 1:
                                        continue
                                    if newCompKey !=  self.SeedKeyword:
                                        if newCompKey not in self.compwordkeysdic:
                                            self.compwordkeysdic[newCompKey]=1*(v/self.num_seedwordkey)/(one_midwordkey_num-v)
                                        else:
                                            self.compwordkeysdic[newCompKey]+=1*(v/self.num_seedwordkey)/(one_midwordkey_num-v)  
        else:
            #中介字典的遍历
            for k, v in self.midwordkeysdic.items():
                one_midwordkey_num = 0
                #搜索记录的一行数据
                #这个for循环真没有什么意义，只是单纯为了统计一遍中介出现数量，好为竞争度计算
                for index,sentence in enumerate(self.fencilog):
                    if index%2 == 1:
                        #如果中介关键字在记录的一行中
                        sentence=sentence.decode('utf-8')
                        if k in sentence:
                            #中介关键字出现次数加一
                            one_midwordkey_num +=1

                partofspeechlist = []
                #偶数行是词语，奇数行是词性
                for index,sentence in enumerate(self.fencilog):
                    if index%2 == 0:
                        sentence = sentence.decode('utf-8')
                        # 存储词性
                        partofspeechlist = sentence.split()
                    if index%2 == 1:
                        sentence = sentence.decode('utf-8')
                        if k in sentence:
                            #将该行进行切分
                            complist = sentence.split()
                            #i是分词后的短语
                            for index, word in enumerate(complist):
                                #去掉虚词连词等，不加入竞争关键词中
                                if inStopwordDic(word):
                                    #complist.remove(word)
                                    continue
                                #筛选词性，不要动词和略词
                                if partofspeechlist[index] == 'v' or partofspeechlist[index] == 'j':
                                    continue
                                #不能与种子关键字和中介关键字相等
                                ##
                                ##这里用点问题，只判断当前的中介关键字够不够，还是去循环判断所有的中介关键字
                                ##
                                if word != self.SeedKeyword and word != k:
                                    if(one_midwordkey_num-v)!=0:
                                        #如果不在不一定不是竞争关键词，可以使用优化
                                        #这里是不在,其实就是竞争关键字了，直接统计就可以
                                        if k not in word:
                                            if word not in self.compwordkeysdic:
                                                #进行加权，v/self.num_seedwordkeys是中介关键词的权重，但是还是没有算出竞争度，所以还要除以mid出现次数-mid_and_seed出现次数(即字典中的value)
                                                self.compwordkeysdic[word]=1*(v/self.num_seedwordkey)/(one_midwordkey_num-v)
                                            else:
                                                self.compwordkeysdic[word]+=1*(v/self.num_seedwordkey)/(one_midwordkey_num-v)
                                        # #
                                        # #
                                        #这个优化是不是正优化，比如华为，荣耀，王者
                                        # #
                                        # #
                                        #解决上面提到的优化问题，比如湖南的竞争关键词一定有湖北，但是分词分出的是湖北省，包含中介关键字（省），所以拆分去除省得到湖北，采用去除中介关键字
                                        else:
                                            newCompKey = word.strip(k)
                                            #还是要确保两个字以上
                                            if len(newCompKey) <= 1:
                                                continue
                                            #这个一定要注意，不然又会出现种子关键字
                                            if newCompKey !=  self.SeedKeyword:
                                                if newCompKey not in self.compwordkeysdic:
                                                    self.compwordkeysdic[newCompKey]=1*(v/self.num_seedwordkey)/(one_midwordkey_num-v)
                                                else:
                                                    self.compwordkeysdic[newCompKey]+=1*(v/self.num_seedwordkey)/(one_midwordkey_num-v)

        print("已经找到所有与种子关键字---"+self.SeedKeyword+"---有关的竞争关键字，下面开始把结果写入文件并退出线程：" + self.name)
        #创建文件夹
        self.outputComp(self.SeedKeyword)
        self.appendSearchRecords(self.SeedKeyword)
        ##存人数据库
        #存人前10个竞争关键字
        compwordkeylist = []
        list = dict(sorted(self.compwordkeysdic.items(), key=lambda x: x[1], reverse=True)).items()
        count = 0
        for key, value in list:
            if count >= 10:
                break
            tempdic = {}
            tempdic[key] = value
            compwordkeylist.append(tempdic)
            count += 1
            pass
        save_compwordkeys(self.SeedKeyword, compwordkeylist)
        print(compwordkeylist)
        #存人搜索记录
        insert_record(self.SeedKeyword)
        print(self.SeedKeyword)



    
    #将竞争关键字进行输出
    def outputComp(self,keyWord):
        newfile = open(resultdirBasepath + keyWord + "\\compKey.txt", 'w', encoding='utf-8')
        list = dict(sorted(self.compwordkeysdic.items(), key=lambda x: x[1], reverse=True)).items()
        for key, value in list:
            newfile.write(key + " " + str(value) + '\n')
            pass
        print("--" + keyWord + "--竞争关键字关键词结果已完成写入")
        newfile.close()

    def appendSearchRecords(self, keyWord):
        newfile = open(searchRecordspath, 'a', encoding='utf-8')
        newfile.write(keyWord+'\n')
        print("--" + keyWord + "--搜索已经追加进搜索记录，下次可直接查询")
        newfile.close()