import pandas as pd
from pyecharts.charts import WordCloud
from pyecharts import options as opts
import jieba
from pyecharts.globals import SymbolType


def get_pic(file_name, job_info_list: list, paiming):
    seted = []  # 已经测试过的
    res = []  # 最后结果
    zhong = []

    for i in job_info_list:
        words = jieba.cut(i)
        santi_words = movestopwords(words)
        zhong += santi_words

    for word in zhong:
        if word not in seted:
            seted.append(word)
            count = zhong.count(word)
            if count >= 5:
                res.append((word, count))
        else:
            pass

    work_list = []
    work_count_list = []
    for workssss, count in res:
        work_list.append(workssss)
        work_count_list.append(count)
    to_excel_dict = {"work": work_list, "count": work_count_list}
    datasssss = pd.DataFrame(to_excel_dict)
    datasssss.to_excel(f"D:\python_work\jobAnalysis\技能分析可视化词云图{paiming}.{file_name}.xlsx", index=False)
    c = (
        WordCloud()
            .add("", res, word_size_range=[10, 150], shape=SymbolType.DIAMOND)
            .set_global_opts(title_opts=opts.TitleOpts(title=f"{file_name}岗位技能需求词云图"))
            .render(f"D:\python_work\jobAnalysis\技能分析可视化词云图{paiming}.{file_name}.html")
    )
    print(file_name, "已经ok")


def query_data(word, method):
    if "数据" in word:
        return word


# 创建停用词list
def stopwordslist(filepath):
    stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
    return stopwords


stopwords = stopwordslist('../cn_stopwords.txt')  # 这里加载停用词的路径


def movestopwords(sentence: list) -> list:
    santi_words = []
    for x1 in sentence:
        if x1 not in stopwords:
            santi_words.append(x1)
    return santi_words


def get_ranking(data):
    """
    得到排名数据，以及开始生成词云图
    :param data:
    :return:
    """
    data_len_dict = {}  # type:dict
    for name in data["职位"].to_list():
        if name in data_len_dict:
            len = data_len_dict[name]
            data_len_dict[name] = len + 1
        else:
            data_len_dict[name] = 1

    sorted_data = sorted(data_len_dict.items(), key=lambda x: x[1], reverse=True)
    name_list = []
    value_list = []
    for key, value in sorted_data[:15]:
        name_list.append(key)
        value_list.append(value)

    inner_dataframe = {"job_name": name_list, "count": value_list}
    work_name_len_data = pd.DataFrame(inner_dataframe)  # 得到排名数据，用作保存


    # 开始生成词云图
    for k, name in enumerate(name_list):
        query_str = f'职位=="{name}"'
        queried_data = data.query(query_str)
        job_info_list = queried_data["job_info"].to_list()
        get_pic(name, job_info_list, k + 1)


def init_data():
    """
    初始化数据
    :return:
    """
    data = pd.read_excel("D:\python_work\jobAnalysis\数据\job_clean.xlsx")
    data["职位"] = data["职位名称"].apply(query_data, method="none")
    data = data.dropna(subset=["职位"])
    data = data.dropna(subset=["job_info"])
    get_ranking(data)


if __name__ == '__main__':
    init_data()