from spider_brief import crawl, dqs
from analysis import create_wordcloud, get_requirement, get_stopwords_set, segment_requirement
import jieba
import jieba.analyse
import os
import matplotlib.pyplot as plt


if __name__ == "__main__":
    result_filename_list = [filename for filename in os.listdir("./output") if filename.startswith('detail')]
    exist_keys = [filename[7:-4] for filename in result_filename_list]
    # print(exist_keys)

    keys = input("输入职位(比如 人工智能 数据挖掘 java后端 互联网产品经理 图像算法工程师等)\n> ").split()
    search_dq = input("输入地区(比如 北京 上海 广州 深圳 苏州 杭州 武汉等)\n> ").split()

    show_keys = keys.copy()

    for key in show_keys:
        if key in exist_keys:
            print(f"{key} 已存在 将被跳过")
            keys.remove(key)
    for dq in search_dq.copy():
        if dq not in dqs.values():
            print(f"{dq} 不在检索范围 将被跳过")
            search_dq.remove(dq)
    if len(keys) != 0:
        if len(search_dq) == 0:
            search_dq = None
        print("爬取相应职位信息...")
        crawl(keys=keys, dq=search_dq)

    exist_keys.extend(keys)
    stopwords_file_path = "./stopwords.txt"
    stopwords = get_stopwords_set(stopwords_file_path)
    other = ["能力", "资格", "工作", "经验", "熟悉", "熟练", "要求", "优先", "具有", "相关",
    "年", "强", "良好", "应用", "基本", "以上", "以下", "以上学历", "一种"]
    for word in other:
        stopwords.add(word)
    for i in range(10):
        stopwords.add(str(i))

    # for key in exist_keys:
    #     requirement_file_path = "./output/detail_" + key + ".txt"
    #     requirement = get_requirement(requirement_file_path)
    #     word_frequency = segment_requirement(requirement, stopwords=stopwords)
    #     print(f"{key}  关键词提取>")
    #     print(jieba.analyse.extract_tags(requirement, withWeight=True))
    #     word_cloud = create_wordcloud(word_frequency)
    #     word_cloud.to_file("./output/" + key + "wordcloud.jpg")
    result = []
    for key in show_keys:
        requirement_file_path = "./output/detail_" + key + ".txt"
        requirement = get_requirement(requirement_file_path)
        word_frequency = segment_requirement(requirement, stopwords=stopwords)
        print(f"{key}  关键词提取>")
        print(jieba.analyse.extract_tags(requirement, withWeight=True))
        word_cloud = create_wordcloud(word_frequency)
        if key not in exist_keys:
            word_cloud.to_file("./output/" + key + "wordcloud.jpg")
        result.append(word_cloud)
    
    # plt.axis('off')
    fig = plt.figure()
    for i in range(len(result)):
        ax = fig.add_subplot(1, len(result), i+1)
        ax.axis('off')
        ax.imshow(result[i])
    plt.show()