from testReptille.JobReptile import JobReptile
import pandas as pd
import jieba
import numpy
from wordcloud import WordCloud , ImageColorGenerator
from scipy.misc import imread
import matplotlib.pyplot as plt

def count_salary(keyword):
  # for num in range(1, 40):
    # url = "https://search.51job.com/list/230300,000000,0000,00,9,99," + keyword + ",2," + str(
    #     num) + ".html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&" \
    #     "companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="
    # html = jr.get_html(url, 'gbk')
    # # all_links = JobReptile.get_all_link(html)
    # jr.get_salary(html)

  dic = JobReptile.read_file("salary.txt")
  list = sorted(dic.items(), key=lambda d: d[1], reverse=True)
  for se in list:
    print(se)


def get_job_description(keyword):
    all_links = []
    # for num in range(1, 40):
    url = "https://search.51job.com/list/230300,000000,0000,00,9,99," + keyword + ",2," + str(
        1) + ".html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&" \
        "companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="
    html = JobReptile.get_html(url, 'gbk')
    all_links.extend(JobReptile.get_all_job_links(html))

    JobReptile.get_all_job_descripton(all_links)


def count_description_keywords():
    line = ""
    with open("description.txt","r",encoding="utf-8",newline="") as f:
        return f.read()

def gen_word_cloud(words_stat):
    """生成词云"""

    print("-----gen word cloud----")

    print(words_stat)


    color_mask = imread("bg.png")
    wordcloud = WordCloud(font_path="simhei.ttf",  # 设置字体可以显示中文
                          background_color="white",  # 背景颜色
                          max_words=100,  # 词云显示的最大词数
                          mask=color_mask,  # 设置背景图片
                          max_font_size=100,  # 字体最大值
                          random_state=42,
                          width=1000, height=860, margin=2,# 设置图片默认的大小,但是如果使用背景图片的话,# 那么保存的图片大小将会按照其大小保存,margin为词语边缘距离
                          )

    word_frequence = {x[0]:x[1] for x in words_stat.head(100).values}
    word_frequence_dic = {}
    for key in word_frequence:
        word_frequence_dic[key] = word_frequence[key]

    wordcloud.generate_from_frequencies(word_frequence_dic)
    # 从背景图片生成颜色值
    image_colors = ImageColorGenerator(color_mask)
    # 重新上色
    wordcloud.recolor(color_func=image_colors)
    # 保存图片
    wordcloud.to_file('output.png')
    plt.imshow(wordcloud)
    plt.axis("off")
    plt.show()


# 主函数运行
if __name__ == "__main__":
    # keywords = "java"
    # 统计工资
    # count_salary(keywords)
    # get_job_description(keywords)
    content = count_description_keywords()
    content_list = jieba.lcut(content)
    words_df = pd.DataFrame({"segment":content_list})

    # 去除stop词语
    stopwords = pd.read_csv("chineseStopWords.txt",index_col=False,quoting=3,sep=" ",names=['stopword'],encoding="gbk")
    words_df = words_df[~words_df.segment.isin(stopwords.stopword)]

    #词频统计
    words_stat = words_df.groupby(by=['segment'])['segment'].agg({"次数":numpy.size})
    words_stat = words_stat.reset_index().sort_values(by=["次数"],ascending=False)

    # 生成词云
    gen_word_cloud(words_stat)