import os
import shutil
import threading
import time
import json
from datetime import datetime
from functools import partial
from multiprocessing import Pool
from operator import attrgetter

from comp_key import *
from dataProcess import Read_data
from dataProcess import data_statistic_jieba
from multiprocessing.dummy import Pool as ThreadPool

# -*- coding:utf-8 -*-

import logging

# 默认的warning级别，只输出warning以上的
# 使用basicConfig()来指定日志级别和相关信息

num_mid_word = 20

with open("../file_location.json", 'r', encoding="utf-8") as load_file:
    CONFIG = json.load(load_file)
data_location = CONFIG["location"]["data_root"]
data_after_process = Read_data.process_data()
data_data_relate = data_location + CONFIG["location"]["data_relate"]
data_after_statistic = data_location + CONFIG["location"]["data_statistic"]
data_mid_word = data_location + CONFIG["location"]["data_midWord"]
data_apart_root = data_location + CONFIG["location"]["data_apart"]
data_statistic_root = data_location + CONFIG["location"]["data_statistic"]
data_temp = data_location + CONFIG["location"]["data_temp"]
data_seed_and_mid_word = data_location + CONFIG["location"]["data_seed_and_mid_word"]
data_comp_word_temp = data_location + CONFIG["location"]["data_comp_word_temp"]
data_mid_word_relate = data_location + CONFIG["location"]["data_mid_word_relate"]
data_comp_word_result = data_location + CONFIG["location"]["data_comp_word_result"]
data_midword_weight_result = data_seed_and_mid_word + CONFIG["location"]["data_midword_weight_result"]
data_seed_word = data_location + CONFIG["location"]["data_seed_word"]
data_log = CONFIG["location"]["data_logging"]

# file_time_cost = CONFIG["location"]["outcome_time_cost"]

logging.basicConfig(level=logging.DEBUG  # 设置日志输出格式
                    , filename=data_log  # log日志输出的文件位置和文件名
                    , filemode="w"  # 文件的写入格式，w为重新写入文件，默认是追加
                    , format="%(asctime)s - %(name)s - %(levelname)-9s - %(filename)-8s : %(lineno)s line - %(message)s"
                    # 日志输出的格式
                    # -8表示占位符，让输出左对齐，输出长度都为8位
                    , datefmt="%Y-%m-%d %H:%M:%S"  # 时间输出的格式
                    )

time_cost_all = {
    "cost_each": []
}

# 竞争性关键词的条数
n = 10


class myThread(threading.Thread):
    def __init__(self, threadID, name, delay):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.name = name
        self.delay = delay

    def run(self):
        print("开始线程：" + self.name)
        # print_time(self.name, self.delay, 5)
        print("退出线程：" + self.name)


def compKey(seed_word, n):
    # 统计影响权重的中的s表示所有与种子关键字相关的搜索量
    s = 0
    # 每一个中介关键字的影响权重，通过字典的形式来保存
    weight_dict = {}
    # 中介关键字与种子构建同时出现的频次
    sa_dict = {}
    # 中介关键字的频次统计
    a_dict = {}

    print("————————开始查找出现的相关搜索记录并分词————————")
    # 与种子关键字相关的搜索记录
    data_relate = data_location + CONFIG["location"]["data_relate"] + seed_word + ".txt"
    # 计时器
    time_find_relate_start = time.time()
    # 创建写入文件，将与种子关键字相关的内容写入文件中
    f_relate = open(data_relate, 'a', encoding='utf-8')
    # 判断是否不包含种子关键字
    is_contain = False
    # 判断是否包含种子关键字
    for line in data_after_process:
        # 逐行读入将，去掉换行
        value_line = line
        value_line.replace("\t", "")
        value_line.replace("\n", "")
        # 逐条匹配，如果找到了字符串则为成功
        if seed_word in value_line:
            is_contain = True
            # 将与种子关键字相关的搜索记录下来并统计s
            f_relate.write(value_line)
            s += 1
    if not is_contain:
        logging.info("不存在关键字，请更换一个")
        print("搜索的数据中不包含该种子关键字", seed_word, "请重新输入")
    # 写完就关上
    f_relate.close()
    time_find_relate_end = time.time()
    time_find_relate_cost = time_find_relate_end - time_find_relate_start
    # time_cost_all["time_find_relate"] = time_cost

    print("查询相关的搜索记录耗时：", time_find_relate_cost)

    # 查询中介关键字
    print("————————开始寻找中介关键字————————")
    time_find_mid_word_start = time.time()

    # 查询中介关键字
    # 文件保存的路径
    # 与种子关键字相关的搜索记录
    word_relate = data_data_relate + seed_word + '.txt'
    # 分词后文件
    word_apart = data_apart_root + seed_word + '.txt'
    # 词频统计文件
    word_static = data_statistic_root + seed_word + '.txt'

    # 进行词频统计
    data_statistic_jieba.statistic(word_relate, word_apart, word_static)

    # 统计程序的运行时间
    time_find_mid_word_end = time.time()
    # 中介关键字列表
    mid_word_contain_list = []
    # 词频统计文件,打开与关闭
    statistics_data = open(data_after_statistic + seed_word + ".txt", 'r', encoding="utf-8")
    statistics_data_lines = statistics_data.readlines()
    statistics_data.close()

    # 中介关键字的打开
    mid_word = open(data_mid_word + seed_word + ".txt", 'a', encoding="utf-8")
    # 统计中介的个数关键字
    cnt = 0
    for line in statistics_data_lines:
        # 取出其内容与第一项，不能以数字为开头，且不能包含种子关键字
        # 每一行包含的信息包括：词语 词频，只取出词语
        contain = line.split(" ")[0]
        c = contain[0]
        # 判断是否满足中介关键字的要求
        if len(contain) <= 1 or (seed_word in contain) or ('0' <= c <= '9'):
            continue
        else:
            mid_word.write(contain)
            mid_word.write('\n')
            mid_word_contain_list.append(contain)
            cnt += 1
        # 找满了
        if cnt == num_mid_word:
            break
    # 中介关键字列表
    print("mid_word_contain_list", mid_word_contain_list)
    mid_word.close()
    print("————————中介关键字查找完成————————")
    time_find_mid_word_cost = time_find_mid_word_end - time_find_mid_word_start
    # print("查询中介关键字耗时(单位s)：", time_find_mid_word_cost)

    print("————————开始查询包含中介关键字与种子关键字的查询搜索量————————")
    list_seed_and_midword = os.listdir(data_seed_and_mid_word)
    if not (seed_word in list_seed_and_midword):
        os.mkdir(data_seed_and_mid_word + seed_word)
    # 记录所有中介关键字的权重，并以txt的方式进行保存
    mid_word_weight_result = open(data_midword_weight_result + seed_word + '.txt', 'a', encoding="UTF-8")

    # 查询所有包含种子关键字与中介关键字的搜索
    # 并计算所有中介关键字的影响权重
    for mid_word_contain in mid_word_contain_list:
        # 计算影响权重中的sa
        sa = 0
        # 读入所有包含种子关键字的搜索
        word_relate_file = data_data_relate + seed_word + '.txt'
        # 将相关的搜索以txt的形式保存
        word_seed_and_midword_file = data_seed_and_mid_word + '/' + seed_word + '/' + mid_word_contain + '.txt'
        word_seed_and_midword_result = open(word_seed_and_midword_file, 'a', encoding="UTF-8")

        word_relate_list = open(word_relate_file, 'r', encoding="UTF-8").readlines()
        # 遍历文件来判断是否包含中介关键字
        for word_relate in word_relate_list:
            if mid_word_contain in word_relate:
                word_seed_and_midword_result.write(word_relate)
                sa += 1

        weight = sa / s
        # 将中介关键字的影响权重保存到字典中，方便下一步的调用
        weight_dict[mid_word_contain] = weight
        sa_dict[mid_word_contain] = sa
        # 将结果保存到txt中
        mid_word_weight_result.write("种子关键字：")
        mid_word_weight_result.write(mid_word_contain)
        mid_word_weight_result.write("中介关键字：")
        mid_word_weight_result.write(mid_word_contain)
        mid_word_weight_result.write("  sa：")
        mid_word_weight_result.write(str(sa))

        mid_word_weight_result.write("  s：")
        mid_word_weight_result.write(str(s))

        mid_word_weight_result.write("  影响权重：")
        mid_word_weight_result.write(str(weight))

        mid_word_weight_result.write("\n")

        word_seed_and_midword_result.close()
    # 记录所有中介关键字的权重，并保存
    mid_word_weight_result.close()

    list_midword_relate = os.listdir(data_mid_word_relate)

    # 创建文件价，保存与中介关键字相关的搜索记录
    if not (seed_word in list_midword_relate):
        os.mkdir(data_mid_word_relate + seed_word)
    # 查询所有与中介关键字相关的搜索记录
    a = 0
    for mid_word_contain in mid_word_contain_list:
        # 将与中介关键字相关的搜索记录保存，等等计算与竞争关键字相关的搜索记录时使用
        mid_word_relate_file = data_mid_word_relate + seed_word + '/' + mid_word_contain + '.txt'
        mid_word_relate = open(mid_word_relate_file, 'a', encoding="UTF-8")
        # 判断是否包含中介关键字，若包含则写入
        for line in data_after_process:
            if mid_word_contain in line:
                a += 1
                mid_word_relate.write(line)
        a_dict[mid_word_contain] = a
        # 关闭搜索中介关键字的文件流
        mid_word_relate.close()
    # 计算所有中介关键字的影响权重

    comp_word_file = open(data_comp_word_temp + seed_word + '.txt', 'a', encoding="UTF-8")

    comp_key_dict = {}

    print("————————开始查找竞争性关键字————————")
    # mid_word_file = open(data_mid_word + seed_word + '.txt', 'r', encoding="utf-8")
    # mid_word = mid_word_file.readlines()
    # search_contain_mid_word = []
    # 对于每一条记录，读出
    for line_mid_word in mid_word_contain_list:
        # 对应竞争性关键字的计算公式的三个参数
        # 所有包含中介关键字的搜索记录
        a = a_dict[line_mid_word]
        # 同时包含种子关键字与中介关键字的搜索
        # sa = sa_dict[line_mid_word]
        # # 同时包含竞争性关键字与中介关键字的搜索
        # ka = 0

        relate_search_temp_filename = data_temp + line_mid_word + '.txt'
        relate_search_temp = open(relate_search_temp_filename, 'a', encoding="utf-8")

        # 统计并且记录所有出现中介关键字的搜索记录，同时计算a
        for line_search in data_after_process:
            if line_mid_word in line_search:
                a += 1
                relate_search_temp.write(line_search)
                relate_search_temp.write('\n')
        relate_search_temp.close()
        # 将中介关键字的频次保存
        a_dict[line_mid_word] = a

        # statistic(word_input, word_apart_file, word_statistics)
        word_input = relate_search_temp_filename
        word_apart_file = data_temp + line_mid_word + 'TempApart.txt'
        word_statistics = data_temp + line_mid_word + 'statistics.txt'
        data_statistic_jieba.statistic(word_input, word_apart_file, word_statistics)

        word_statistics_outcome = open(word_statistics, 'r', encoding="utf-8")
        word_statistics_outcome_lines = word_statistics_outcome.readlines()
        word_statistics_outcome.close()
        # 保存最后的竞争性关键字的文件

        # 竞争性关键字，由于要找10个，所以使用一个数组来存
        compare_keywords = []
        for line in word_statistics_outcome_lines:
            line_contain = line.split(" ")[0]
            first = line_contain[0]
            # 过滤掉仅有一个字和以数字作为开头的竞争性关键字
            if (len(line_contain) <= 1) or ('0' <= first <= '9'):
                continue

            # 竞争性关键字不能包含或者被包含在种子关键字
            # 竞争性关键字不能包含或者被包含在中介关键字
            if (not (seed_word in line_contain)) \
                    and (not (line_contain in seed_word)
                         and not (line_contain in line_mid_word)
                         and not (line_mid_word in line_contain)):
                # 将竞争性关键字保存到list中
                compare_keywords.append(line_contain)

                if not (line_contain in comp_key_dict):
                    comp_key_dict[line_contain] = comp_key(seed_word, line_contain)
                    comp_key_dict[line_contain].mid_word[line_mid_word] = weight_dict[line_mid_word]

                else:
                    comp_key_dict[line_contain].mid_word[line_mid_word] = weight_dict[line_mid_word]

                # 写入文件中
                comp_word_file.write("mid_word ")
                comp_word_file.write(line_mid_word)
                comp_word_file.write(" comp_word ")
                comp_word_file.write(line_contain)
                comp_word_file.write("\n")
            # 找够了就break
            if len(compare_keywords) == n:
                break

        print("中介关键字：", line_mid_word, "所有竞争性关键字：", compare_keywords)

    for comp_key_dict_i in comp_key_dict:
        for mid_word in comp_key_dict[comp_key_dict_i].mid_word:

            # 计算包含中介关键字与竞争性关键字
            ka = 0

            mid_word_relate_file = data_mid_word_relate + seed_word + '/' + mid_word + '.txt'
            mid_word_relate = open(mid_word_relate_file, 'r', encoding="UTF-8")
            mid_word_relate_lines = mid_word_relate.readlines()
            for mid_word_relate_line in mid_word_relate_lines:
                if comp_key_dict_i in mid_word_relate_line:
                    ka += 1
            mid_word_relate.close()
            a = a_dict[mid_word]
            sa = sa_dict[mid_word]
            comp_value = ka / (a - sa)
            comp_key_dict[comp_key_dict_i].comp_mid[mid_word] = comp_value

            comp_key_dict[comp_key_dict_i].comp += comp_key_dict[comp_key_dict_i].mid_word[mid_word] * comp_value

    comp_result = []
    for comp_key_dict_i in comp_key_dict:
        comp_key_temp = comp_key(seed_word, comp_key_dict_i)
        comp_key_temp.comp = comp_key_dict[comp_key_dict_i].comp
        comp_result.append(comp_key_temp)
    comp_result = sorted(comp_result, key=attrgetter("comp"), reverse=True)

    # 冒泡排序实现竞争度的排序

    num = min(len(comp_result), n)

    for i in range(0, num):
        # print(comp_result[i].comp_k, comp_result[i].comp)
        comp_word_outcome = open(data_comp_word_result + seed_word + '.txt', 'a', encoding="UTF-8")
        comp_word_outcome.write("seed_word:")
        comp_word_outcome.write(seed_word)
        comp_word_outcome.write(" comp_word:")
        comp_word_outcome.write(comp_result[i].comp_k)
        comp_word_outcome.write(" comp_value:")
        comp_word_outcome.write(str(comp_result[i].comp))
        comp_word_outcome.write('\n')

    comp_word_outcome.close()


if __name__ == "__main__":
    # 清除outcome文件夹里各个输出的文件
    shutil.rmtree(data_data_relate)
    os.mkdir(data_data_relate)
    shutil.rmtree(data_seed_and_mid_word)
    os.mkdir(data_seed_and_mid_word)
    shutil.rmtree(data_comp_word_result)
    os.mkdir(data_comp_word_result)
    shutil.rmtree(data_comp_word_temp)
    os.mkdir(data_comp_word_temp)
    shutil.rmtree(data_mid_word_relate)
    os.mkdir(data_mid_word_relate)
    shutil.rmtree(data_after_statistic)
    os.mkdir(data_after_statistic)
    shutil.rmtree(data_mid_word)
    os.mkdir(data_mid_word)
    shutil.rmtree(data_apart_root)
    os.mkdir(data_apart_root)
    shutil.rmtree(data_temp)
    os.mkdir(data_temp)

    start = datetime.now()

    keywords = open(data_seed_word, 'r', encoding="UTF-8").readline().split(' ')
    # ['宇宙', '三体', '地球', '故乡', '电路板', '秦时明月', '李白', '杜甫', '银魂', '仙剑奇侠传', '她和她的猫', '未闻花名', '你的名字']
    print(keywords)
    list_cost = []
    #
    # # 单线程 耗时0:17:01.590100
    # for keyword in keywords:
    #     print(keyword)
    #     compKey(keyword, 10)

    # 定义线程池大小
    # 1核 耗时0:16:44.357987
    # 3核 耗时0:16:48.429230
    # 4核 耗时0:16:47.475592
    # 23核 耗时 0:17:44.837312
    # pool = ThreadPool(23)

    # 换为多进程
    # 2核 耗时0:10:16.958455
    # 3核 耗时0:08:30.431016
    # 4核 耗时0:06:18.131557
    # 5核 耗时0:05:59.459453 但此时CPU占用率100%，内存占用率90%以上，以防万一还是建议采用4核
    pool = Pool(4)
    # 将compkey运用于每一个keyword，线程顺序执行
    pool.map(partial(compKey, n=10), keywords)
    #
    # 关闭进程池，使之不再接受新任务
    pool.close()
    # 主进程阻塞，等待子进程退出（必须在close或terminate之后）
    pool.join()
    duration = datetime.now() - start
    print("总耗时：", duration)
