import os
import jieba
from collections import Counter
import math

from Crypto.SelfTest.Cipher.test_CBC import file_name

jieba.load_userdict("E:\Code\PythonProject\CompKey Algorithm Experiment\CompKey Algorithm Experiment\experimet2\\try_words")

# 检查路径是否存在，不存在则创建
def path_check(path):
    if not os.path.exists(path):
        os.makedirs(path)

# 判断两个词是否高度相似
def word_similarity(word1, word2):
    if word1 in word2 or word2 in word1:
        return True
    return False

# 提取包含种子关键词的句子/中介关键词
def extract_seed_sentences(seedword, input_file, output_file):
    with open(input_file, 'r', encoding='utf-8') as origin_data, open(output_file, 'w', encoding='utf-8') as result_data:
        for sentence in origin_data:
            if seedword in sentence:
                result_data.write(sentence)

# 对文本进行分词
def jieba_tokenize(input_file, output_file):
    sep_list = []
    with open(input_file, 'r', encoding='utf-8') as data:
        for line in data:
            seg_list = jieba.cut(line.strip(), cut_all=False)
            sep_list.extend(seg_list)
    with open(output_file, 'w', encoding='utf-8') as file:
        for word in sep_list:
            file.write(word + '\n')

# 移除停用词
def remove_stop_words(input_file, output_file, stop_words):
    word_list = []
    with open(input_file, 'r', encoding='utf-8') as data:
        word_list = [word.strip() for word in data]
    word_cleaned = [word for word in word_list if word not in stop_words]
    with open(output_file, 'w', encoding='utf-8') as file:
        for word in word_cleaned:
            file.write(word + '\n')

# 获取种子词和中介词的共现频次
def get_s_sa(seedword, agencyword, file_path):
    s_num = 0
    sa_num = 0
    with open(file_path, encoding='utf-8') as origin_data:
        for sentence in origin_data:
            if seedword in sentence:
                s_num += 1
            if agencyword in sentence:
                sa_num += 1
    return s_num, sa_num

# 获取关键词统计和权重

def seed_agent(filename, seedword, filtered_file, search_info_file):
    word_list = []
    result = []

    # 统计出现次数
    with open(filtered_file, encoding='utf-8') as data:
        for word in data:
            word_list.append(word.strip())
    count_result = Counter(word_list)

    for key, val in count_result.most_common(100):
        if word_similarity(seedword, key):
            continue
        s_num, sa_num = get_s_sa(seedword, key, search_info_file)

        if s_num > 0:
            weight = round(sa_num / s_num, 8)
        else:
            weight = 0

        key_val = f"中介关键字：{key}||出现次数：{val}||中介关键词权重：{weight}"
        result.append(key_val)

    # 对结果按中介关键词权重降序排列
    result.sort(key=lambda x: float(x.split("中介关键词权重：")[1]), reverse=True)

    # 写入文件
    with open(filename, 'w', encoding='utf-8') as file:
        for words in result:
            file.write(words + '\n')

# 主流程
def main():
    seedwords_list = ['诛仙']
    stop_words_file = "stop_words.txt"

    # 加载停用词
    with open(stop_words_file, 'r', encoding='utf-8') as f:
        stop_words_list = f.read().splitlines()

    # 创建目录
    path_check('./seedwords_agencywords/search_info')
    path_check('./seedwords_agencywords/jieba_search_info')
    path_check('./seedwords_agencywords/stop_words_filter')
    path_check('./seedwords_agencywords/agency_words')

    for seedword in seedwords_list:
        # Step 1: 提取种子关键词相关句子
        search_info_file = f'./seedwords_agencywords/search_info/seedword_{seedword}.txt'
        extract_seed_sentences(seedword, './re_filter.txt', search_info_file)

        # Step 2: 分词处理
        jieba_file = f'./seedwords_agencywords/jieba_search_info/seedword_{seedword}.txt'
        jieba_tokenize(search_info_file, jieba_file)

        # Step 3: 停用词过滤
        stop_words_filtered_file = f'./seedwords_agencywords/stop_words_filter/seedword_{seedword}.txt'
        remove_stop_words(jieba_file, stop_words_filtered_file, stop_words_list)

        # Step 4: 计算中介关键词权重
        agency_words_file = f'./seedwords_agencywords/agency_words/seedword_{seedword}.txt'
        seed_agent(agency_words_file, seedword, stop_words_filtered_file, search_info_file)

if __name__ == "__main__":
    main()

# import os
# import jieba
# from collections import Counter
# import math
#
#
# # 检查路径是否存在，不存在则创建
# def path_check(path):
#     if not os.path.exists(path):
#         os.makedirs(path)
#
#
# # 统计文件中每个词的出现次数
# def count_words(file_path):
#     with open(file_path, 'r', encoding='utf-8') as f:
#         return Counter(word.strip() for word in f)
#
#
# # 判断两个词是否高度相似
# def word_similarity(word1, word2):
#     if word1 in word2 or word2 in word1:
#         return True
#     return False
#
#
# # 提取包含种子关键词的句子/中介关键词
# def extract_seed_sentences(seedword, input_file, output_file):
#     with open(input_file, 'r', encoding='utf-8') as origin_data, open(output_file, 'w',
#                                                                       encoding='utf-8') as result_data:
#         for sentence in origin_data:
#             if seedword in sentence:
#                 result_data.write(sentence)
#
#
# # 对文本进行分词
# def jieba_tokenize(seedword, input_file, output_file):
#     sep_list = []
#     with open(input_file, 'r', encoding='utf-8') as data:
#         for line in data:
#             seg_list = jieba.cut(line.strip(), cut_all=False)
#             sep_list.extend(seg_list)
#     with open(output_file, 'w', encoding='utf-8') as file:
#         for word in sep_list:
#             file.write(word + '\n')
#
#
# # 移除停用词
# def remove_stop_words(seedword, input_file, output_file, stop_words):
#     word_list = []
#     with open(input_file, 'r', encoding='utf-8') as data:
#         word_list = [word.strip() for word in data]
#     word_cleaned = [word for word in word_list if word not in stop_words]
#     with open(output_file, 'w', encoding='utf-8') as file:
#         for word in word_cleaned:
#             file.write(word + '\n')
#
#
# # 获取关键词统计结果
# def seed_agent(seedword, input_file, output_file, search_volumes):
#     word_list = []
#     with open(input_file, 'r', encoding='utf-8') as data:
#         word_list = [word.strip() for word in data]
#     count_result = Counter(word_list)
#     result = []
#     for key, val in count_result.most_common(100):
#         if word_similarity(seedword, key):
#             continue
#         result.append(f"中介关键字：{key}||出现次数：{val}||搜索量：{search_volumes.get(key, 0)}")
#
#     with open(output_file, 'w', encoding='utf-8') as file:
#         for line in result:
#             file.write(line + '\n')
#
#
# # 计算权重
# def calculate_weights(agency_keywords, search_volumes):
#     weights = {}
#     for keyword, search_volume in search_volumes.items():
#         # 这里定义一个简单的权重计算公式：权重 = 搜索量 * 关键词频次
#         frequency = agency_keywords.get(keyword, 0)
#         weights[keyword] = search_volume * frequency
#     return weights
#
#
# # 统计搜索量（此处简化为模拟数据，实际应用中需要从搜索引擎API获取）
# def get_search_volumes(seedwords, agency_keywords):
#     search_volumes = {}
#     for word in seedwords + list(agency_keywords.keys()):
#         # 模拟搜索量为词频的两倍
#         search_volumes[word] = agency_keywords.get(word, 0) * 2
#     return search_volumes
#
#
# # 主流程
# def main():
#     seedwords_list = ['图片', '手机', '意思', '小说', '视频', '下载', '大全', '电影', '中国', '世界', '重生', '百度',
#                       '官网', '英语', '电视剧']
#     stop_words_file = "stop_words.txt"
#
#     # 加载停用词
#     with open(stop_words_file, 'r', encoding='utf-8') as f:
#         stop_words_list = f.read().splitlines()
#
#     # 创建目录
#     path_check('./seedwords_agencywords/search_info')
#     path_check('./seedwords_agencywords/jieba_search_info')
#     path_check('./seedwords_agencywords/stop_words_filter')
#     path_check('./seedwords_agencywords/agency_words')
#
#     # 用于存储所有中介关键词及其出现次数
#     agency_keywords_all = {}
#
#     for seedword in seedwords_list:
#         # Step 1: 提取种子关键词相关句子/中介关键词
#         search_info_file = f'./seedwords_agencywords/search_info/seedword_{seedword}.txt'
#         extract_seed_sentences(seedword, './re_filter.txt', search_info_file)
#
#         # Step 2: 分词处理
#         jieba_file = f'./seedwords_agencywords/jieba_search_info/seedword_{seedword}.txt'
#         jieba_tokenize(seedword, search_info_file, jieba_file)
#
#         # Step 3: 停用词过滤
#         stop_words_filtered_file = f'./seedwords_agencywords/stop_words_filter/seedword_{seedword}.txt'
#         remove_stop_words(seedword, jieba_file, stop_words_filtered_file, stop_words_list)
#
#         # Step 4: 关键词统计
#         agency_words_file = f'./seedwords_agencywords/agency_words/seedword_{seedword}.txt'
#         seed_agent(seedword, stop_words_filtered_file, agency_words_file, agency_keywords_all)
#
#         # 将中介关键词加入总词汇表
#         with open(agency_words_file, 'r', encoding='utf-8') as f:
#             for line in f:
#                 keyword = line.split("||")[0].split("：")[1].strip()
#                 agency_keywords_all[keyword] = agency_keywords_all.get(keyword, 0) + 1
#
#     # 获取搜索量
#     search_volumes = get_search_volumes(seedwords_list, agency_keywords_all)
#
#     # 计算权重
#     weights = calculate_weights(agency_keywords_all, search_volumes)
#
#     # 输出权重结果
#     with open('./seedwords_agencywords/agency_weights.txt', 'w', encoding='utf-8') as f:
#         for keyword, weight in weights.items():
#             f.write(f"中介关键字：{keyword}||权重：{weight}\n")
#
#
# if __name__ == "__main__":
#     main()
#
