import os
import time
import json

import jieba
import zhon.hanzi
from jieba import posseg
from nltk.corpus import stopwords
from dataProcess import Read_data

# 标点符号
punc = zhon.hanzi.punctuation
# 停顿词

stopwords_cn = stopwords.words("chinese")

# 读入配置文件
CONFIG = Read_data.read_config()

data_location = CONFIG["location"]["data_root"]
data_midword_relate = data_location + CONFIG["location"]["midword"]
data_search_relation = data_location + CONFIG["location"]["search_relation"]

data_after_process = Read_data.process_data()

num_midword = 10


def read_data_from_file(file_to_search):
    file_midword = data_midword_relate + '/' + file_to_search
    file_search_relation = data_search_relation + '/' + file_to_search

    search_relation_file = open(file_search_relation, 'r', encoding="UTF-8")
    search_relation_lines = search_relation_file.readlines()

    midword_file = open(file_midword, 'r', encoding="UTF-8")
    midword_lines = midword_file.readlines()

    midword_file.close()
    search_relation_file.close()

    search_relation = []
    midword = []

    for line in search_relation_lines:
        line = line.replace('\n', '')
        line = line.replace('\t', '')
        search_relation.append(line)

    for line in midword_lines:
        line = line.replace('\n', '')
        word, num = line.split(' ')
        item = (word, num)
        midword.append(item)

    return search_relation, midword


# 如果本地没有这个关键字，则我们将进行搜索，并根据搜索的结果放回数据并记录数据
def search_seedword_relation_and_statistics(seed_word):
    time_start = time.time()

    f_location_relate_search_relation = data_search_relation + '/' + seed_word + '.txt'
    f_relate_search_relation = open(f_location_relate_search_relation, 'a', encoding='utf-8')
    f_word_relate = open(data_midword_relate + '/' + seed_word + '.txt', 'a', encoding='utf-8')
    search_relate = []
    for search_line in data_after_process:
        value_line = search_line
        value_line.replace("\t", "")
        value_line.replace("\n", "")
        # 逐条匹配，如果找到了字符串则为成功
        if seed_word in value_line:
            # 将与种子关键字相关的搜索记录下来并统计s
            f_relate_search_relation.write(value_line)
            search_relate.append(seed_word)
    f_relate_search_relation.close()
    time_end = time.time()
    time_search = time_end - time_start
    # print("searchTime=", time_search)

    with open(f_location_relate_search_relation, encoding="utf-8") as word_input_file:
        text = word_input_file.read()
    time_start = time.time()
    word_apart_outcome = posseg.cut(text)
    time_end = time.time()
    time_cut = time_end - time_start
    # print("searchCut=", time_cut)
    word_apart = []
    for word, flag in word_apart_outcome:
        # print(word, flag)
        if not (flag == 'v' or flag == 'uj'):
            if not (word in seed_word or seed_word in word or len(word) <= 1 or '0' <= word[0] <= '9'):
                word_apart.append(word)

    # 词频统计
    counts = {}
    for word in word_apart:
        if len(word) > 1 and word != '\n' and word != '\t':
            counts[word] = counts.get(word, 0) + 1

    # 取出停顿词
    for word in stopwords_cn:
        counts.pop(word, 0)

    ls_sorted = sorted(counts.items(), key=lambda x: x[1], reverse=True)

    file_outcome = data_midword_relate + '/' + seed_word + '.txt'

    with open(file_outcome, 'a', encoding="utf-8") as statistic_result:
        statistic_result.write('\n'.join('%s %s' % ls_sorted[i] for i in range(0, num_midword)))

    return search_relate, ls_sorted[0:20]


def search_seedword_relation(seedword):
    listFile = os.listdir(data_midword_relate)
    file_to_search = seedword + '.txt'
    # 如果存在则直接从本地读取
    if file_to_search in listFile:
        search_relate, midword = read_data_from_file(file_to_search)

    # 否则查询后记录在本地
    else:
        search_relate, midword = search_seedword_relation_and_statistics(seedword)

    return search_relate, midword


def statistics_midword_weight(research_relation, midwords):
    weight = {}
    for midword in midwords:
        weight[midword[0]] = int(midword[1]) / len(research_relation)
    return weight


def create_word(seed_word):
    listFile = os.listdir(data_midword_relate)
    file_to_search = seed_word + '.txt'
    # 如果存在则直接从本地读取
    if file_to_search in listFile:
        search_relate, midwords = read_data_from_file(file_to_search)

    # 否则查询后记录在本地
    else:
        search_relate, midwords = search_seedword_relation_and_statistics(seed_word)

    midwords_word = []
    for midword in midwords:
        midwords_word.append(midword[0])
    # print(midwords_word)

    cnt = 0
    while cnt < 1000:
        word = midwords_word[cnt]
        search_relate, midwords_mid = search_seedword_relation_and_statistics(word)
        for midwords_mid_word in midwords_mid:
            word_mid = midwords_mid_word[0]
            if word_mid in midwords_word:
                continue
            else:
                midwords_word.append(word_mid)
        cnt += 1


def compKey(seed_word):
    time_start = time.time()
    comp_word = {}
    '''
    ————————开始查找出现的相关搜索记录并分词————————
    '''
    seed_word_search_relate, midwords = search_seedword_relation(seed_word)

    '''
    ————————开始查找中介关键字的影响权重————————
    '''
    # 每一个中介关键字的影响权重，通过字典的形式来保存
    weight_dict = statistics_midword_weight(seed_word_search_relate, midwords)
    # print(weight_dict["纸尿裤"])
    '''
    ————————开始查找竞争性关键字————————
    '''
    midwords_info_list = {}
    static_word = {}
    for midword in midwords:
        list_comp_word = []
        midword_search_relate, compwords = search_seedword_relation(midword[0])
        a = len(midword_search_relate)
        sa = int(midword[1])
        for compkey in compwords:
            if (seed_word in compkey[0]) or (compkey[0] in seed_word):
                continue
            else:
                # 将其放在一个list中返回
                list_comp_word.append(compkey[0])

                if not (compkey[0] in comp_word.keys()):
                    comp_word[compkey[0]] = 0
                    static_word[compkey[0]] = 0
                ka = float(compkey[1])
                comp_value_k = ka / (a - sa)
                comp_value = weight_dict[midword[0]] * comp_value_k
                comp_word[compkey[0]] += comp_value
                static_word[compkey[0]] += ka
        midwords_info_list[midword[0]] = list_comp_word
    # print(midwords_info_list)
    comp_word = sorted(comp_word.items(), key=lambda d: d[1], reverse=True)

    # static_outcome = {}
    static_outcome = []
    for word in comp_word[0:10]:
        print(word[0])
        print(static_word[word[0]])
        static_outcome.append(static_word[word[0]])
        # static_outcome[word[0]] = static_word[word[0]]

    print(static_outcome)
    return comp_word[0:10], midwords_info_list, weight_dict, static_outcome


if __name__ == "__main__":
    # search_seedword_relation_and_statistics('北京')
    comp_word, midwords_info_list, weight_dict, static_outcome = compKey('菊花')
    print(static_outcome)
