import requests
import json
import pandas as pd
import time
from pyecharts import options as opts
from pyecharts.charts import Map
from pyecharts.faker import Faker
from pyecharts.charts import WordCloud
from pyecharts.globals import SymbolType
from pyecharts.charts import Bar
import numpy as np
from pyecharts.charts import Scatter
from pyecharts.globals import ThemeType
from pyecharts.charts import Pie


# 获取源数据并导出为excel表格
def getData(area, job):
    url = "https://apic.liepin.com/api/com.liepin.searchfront4c.pc-search-job"
    地区编码字典 = {"广州": "050020", "深圳": "050090", "上海": "020", "北京": "010"}
    payload = {
        "data": {
            "mainSearchPcConditionForm": {
                "city": 地区编码字典[area],
                "dq": 地区编码字典[area],
                "pubTime": "",
                "currentPage": 0,
                "pageSize": 40,
                "key": job,
                "suggestTag": "",
                "workYearCode": "0",
                "compId": "",
                "compName": "",
                "compTag": "",
                "industry": "",
                "salary": "",
                "jobKind": "",
                "compScale": "",
                "compKind": "",
                "compStage": "",
                "eduLevel": "",
            },
            'passThroughForm':{
              "scene": "condition",
              "skId": "un5mbbc5heverai9y3iepgy89qb7mgs8",
              "fkId": "un5mbbc5heverai9y3iepgy89qb7mgs8",
              "ckId": "fkph9uo4f3cpp5btgn4qrnyfvfikb6o9",
              "suggest": None
        }
        }
    }
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Content-Length": "488",
        "Content-Type": "application/json;charset=UTF-8;",
        "Cookie": "inited_user=ca2cd52224a6e0b50fba8d5f435ce6ad; __uuid=1682499562694.76; __gc_id=99bb00cff58c4dc99e3ede30d707b239; _ga=GA1.1.2125610548.1682499578; need_bind_tel=false; new_user=false; c_flag=f955f8a6248606fc81c6e5c458164d91; imId=de0f5d1ae0b3ffb75a00348d1cd66d6b; imId_0=de0f5d1ae0b3ffb75a00348d1cd66d6b; imClientId=de0f5d1ae0b3ffb7d5a5cf7e26aec6bc; imClientId_0=de0f5d1ae0b3ffb7d5a5cf7e26aec6bc; XSRF-TOKEN=pk_u883_SU-a_p0ztiK1sg; __tlog=1687091721647.99%7C00000000%7C00000000%7Cs_o_007%7Cs_o_007; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1686039919,1686041969,1686199397,1687091722; acw_tc=276077b816870917216422338efb96aa2e3d0e36b2d3582094dc4bc1a2bd09; UniqueKey=3f75b05cd12921a941344312b1f2ca2b; liepin_login_valid=0; lt_auth=u%2BYCP3FRnFv8tyXfijRf4%2FtJiNOvUzqfpXgJ1x9UitS5CaDk4P%2FmRg2FrrIG%2BioIq0x8IvQzMLb2Muj3yHRP7kAV%2BFGnlZ6utf6k0HsCUeVlJMW2vezHg%2FXUQp0lnEAA8nJbpEIL%2BVzO; access_system=C; user_roles=0; user_photo=5f8fa3a78dbe6273dcf85e2608u.png; user_name=%E5%88%98%E7%90%AA%E6%B6%9B; inited_user=ca2cd52224a6e0b50fba8d5f435ce6ad; imApp_0=1; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1687091767; __session_seq=4; __uv_seq=4; fe_im_socketSequence_new_0=2_1_2; fe_im_connectJson_0=%7B%220_3f75b05cd12921a941344312b1f2ca2b%22%3A%7B%22socketConnect%22%3A%223%22%2C%22connectDomain%22%3A%22liepin.com%22%7D%7D; fe_im_opened_pages=; _ga_54YTJKWN86=GS1.1.1687091721.4.1.1687091780.0.0.0",
        "Host": "apic.liepin.com",
        "Origin": "https://www.liepin.com",
        "Pragma": "no-cache",
        "Referer": "https://www.bing.com/",
        "sec-ch-ua": '"Not A;Brand";v="99", "Chromium";v="114", "Google Chrome";v="114"',
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": '"Windows"',
        "Sec-Fetch-Dest": "document",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-Site": "cross-site",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
        "X-Client-Type": "web",
        "X-Fscp-Bi-Stat": '{"location": "https://www.liepin.com/zhaopin/?city=050020&dq=050020&pubTime=&currentPage=0&pageSize=40&key=%E5%89%8D%E7%AB%AF%E5%BC%80%E5%8F%91&suggestTag=&workYearCode=1&compId=&compName=&compTag=&industry=&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&sfrom=search_job_pc&ckId=6h7hsgd5mh8ik9abo2fwa90bwots99fa&scene=condition&skId=6h7hsgd5mh8ik9abo2fwa90bwots99fa&fkId=6h7hsgd5mh8ik9abo2fwa90bwots99fa&suggestId="}',
        "X-Fscp-Fe-Version": "",
        "X-Fscp-Std-Info": '{"client_id": "40108"}',
        "X-Fscp-Trace-Id": "59e152d8-d55c-4d21-b484-0628cad4097f",
        "X-Fscp-Version": "1.1",
        "X-Requested-With": "XMLHttpRequest",
        "X-XSRF-TOKEN": "pk_u883_SU-a_p0ztiK1sg",
    }
    r = requests.post(url, data=json.dumps(payload), headers=headers)
    response_data = r.json()
    page = response_data["data"]["pagination"]["totalPage"]

    response_df = []
    for i in range(page):
        payload["data"]["mainSearchPcConditionForm"]["currentPage"] = i
        r = requests.post(url, data=json.dumps(payload), headers=headers)
        response_data = r.json()
        df = pd.json_normalize(response_data["data"]["data"]["jobCardList"])
        response_df.append(df)

    df = pd.concat(response_df)
    key = payload["data"]["mainSearchPcConditionForm"]["key"]
    output_time = (
        str(time.localtime().tm_mon)
        + str(time.localtime().tm_mday)
        + "_"
        + str(time.localtime().tm_hour)
        + str(time.localtime().tm_min)
    )
    new_df = df[
        [
            "job.title",
            "job.salary",
            "job.dq",
            "comp.compName",
            "comp.compIndustry",
            "job.requireEduLevel",
            "job.requireWorkYears",
        ]
    ]
    new_df.columns = ["工作", "薪资", "地区", "公司名称", "公司类型", "学历要求", "工作经验"]
    new_df = new_df.reset_index(drop=True)
    df.to_excel(key + "_liepin_" + output_time + ".xlsx")
    # 返回key值，输出的时间以及表格
    return key, output_time, new_df


# 根据地区进行地图渲染
def areaData(job, area, output_time):
    """liepin数据地区分布数据分析及可视化"""
    df = pd.read_excel(job + "_liepin_" + output_time + ".xlsx")
    df_PM_gz = df[
        [
            "job.labels",
            "job.refreshTime",
            "job.title",
            "job.salary",
            "job.dq",
            "job.topJob",
            "job.requireWorkYears",
            "job.requireEduLevel",
            "comp.compStage",
            "comp.compName",
            "comp.compIndustry",
            "comp.compScale",
        ]
    ]
    地区 = [
        i.split("-")[1]
        for i in df_PM_gz["job.dq"].value_counts().index.tolist()
        if "-" in i
    ]
    岗位个数 = df_PM_gz["job.dq"].value_counts().values.tolist()[1:]
    c = (
        Map()
        .add(area, [list(z) for z in zip(地区, 岗位个数)], area)
        .set_global_opts(
            title_opts=opts.TitleOpts(title= area + "-" + job + "岗位分布地图"),
            visualmap_opts=opts.VisualMapOpts(),
        )
    )
    s = (
        Pie()
        .add("",[list(z) for z in zip(地区, 岗位个数)],radius=["40%", "75%"])
        .set_global_opts(
            title_opts=opts.TitleOpts(title=area + "-" + job + "岗位分布"),
            legend_opts=opts.LegendOpts(orient="vertical", pos_top="15%", pos_left="2%"),
        )
        .set_series_opts(label_opts=opts.LabelOpts(formatter="{b}: {c}"))
    )
    return c,s


# 技能词云封装
def skillData(job, area, output_time):
    df = pd.read_excel(job + "_liepin_" + output_time + ".xlsx")
    df_PM_gz = df[
        [
            "job.labels",
            "job.refreshTime",
            "job.title",
            "job.salary",
            "job.dq",
            "job.topJob",
            "job.requireWorkYears",
            "job.requireEduLevel",
            "comp.compStage",
            "comp.compName",
            "comp.compIndustry",
            "comp.compScale",
        ]
    ]
    df_PM_gz["job.labels"].apply(lambda x: eval(x)).tolist()
    PM_labels_list = [
        j for i in df_PM_gz["job.labels"].apply(lambda x: eval(x)).tolist() for j in i
    ]
    PM_labels_words = [(i, PM_labels_list.count(i)) for i in set(PM_labels_list)]
    c = (
        WordCloud()
        .add("", PM_labels_words, word_size_range=[20, 100], shape=SymbolType.DIAMOND,        textstyle_opts=opts.TextStyleOpts(font_family="cursive"),)
        .set_global_opts(
            title_opts=opts.TitleOpts(title=job + "-" + area + "地区-技能/产品 词云图")
        )
    )
    return c


# 岗位名称词云封装
def jobData(job, area, output_time):
    df = pd.read_excel(job + "_liepin_" + output_time + ".xlsx")
    df_PM_gz = df[
        [
            "job.labels",
            "job.refreshTime",
            "job.title",
            "job.salary",
            "job.dq",
            "job.topJob",
            "job.requireWorkYears",
            "job.requireEduLevel",
            "comp.compStage",
            "comp.compName",
            "comp.compIndustry",
            "comp.compScale",
        ]
    ]
    df_PM_gz["job.title"][df_PM_gz["job.title"].str.contains("（")].str.split("（").apply(
        lambda x: x[0]
    )
    df_job_title = (
        df_PM_gz["job.title"]
        .apply(lambda x: x.split("（")[0].split("/")[0].split("(")[0])
        .value_counts()
    )
    PM_title_words = [
        (df_job_title.index.tolist()[i], df_job_title.values.tolist()[i])
        for i in range(1, len(df_job_title.index.tolist()))
    ]
    c = (
        WordCloud()
        .add("", PM_title_words, word_size_range=[20, 100], shape=SymbolType.DIAMOND)
        .set_global_opts(
            title_opts=opts.TitleOpts(title=job + "-" + area + "地区-岗位职称 词云图")
        )
    )
    return c


# 薪资函数封装 其中包含柱状图  和 散点图
def salaryData(job, area, output_time):
    df = pd.read_excel(job + "_liepin_" + output_time + ".xlsx")
    df_PM_gz = df[
        [
            "job.labels",
            "job.refreshTime",
            "job.title",
            "job.salary",
            "job.dq",
            "job.topJob",
            "job.requireWorkYears",
            "job.requireEduLevel",
            "comp.compStage",
            "comp.compName",
            "comp.compIndustry",
            "comp.compScale",
        ]
    ]
    df_PM_gz = df_PM_gz.rename(
        columns={
            "job.labels": "职位标签",
            "job.refreshTime": "职位更新时间",
            "job.title": "职位",
            "job.salary": "薪资",
            "job.dq": "地区",
            "job.topJob": "是否top职位",
            "job.requireWorkYears": "工作年限",
            "job.requireEduLevel": "学历",
            "comp.compStage": "公司融资情况",
            "comp.compName": "公司名称",
            "comp.compIndustry": "行业",
            "comp.compScale": "规模",
        }
    )
    非薪资面议 = df_PM_gz[~df_PM_gz["薪资"].str.contains("面议|元/天")]
    非薪资面议_detail = 非薪资面议["薪资"].apply(lambda x: x.split("薪")[0].split("·")).tolist()
    平均薪资 = [
        (int(i[0].split("-")[0]) + int(i[0].split("-")[1].split("k")[0])) / 2
        if len(i) == 1
        else round(
            (int(i[0].split("-")[0]) + int(i[0].split("-")[1].split("k")[0]))
            / 2
            * int(i[1])
            / 12,
            1,
        )
        for i in 非薪资面议_detail
    ]
    非薪资面议["平均薪资"] = 平均薪资
    非薪资面议.drop(非薪资面议[非薪资面议["工作年限"].str.contains("-") == False].index, inplace=True)
    分地区_平均薪资 = 非薪资面议.groupby("地区").agg({"平均薪资": "median"})
    分地区_平均薪资_values = [round(i[0], 1) for i in 分地区_平均薪资.values.tolist()]
    分地区_平均薪资_index = 分地区_平均薪资.index.tolist()
    # 柱状图
    c = (
        Bar(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))
        .add_xaxis([i.split("-")[1] for i in 分地区_平均薪资_index[1:]])
        .add_yaxis("地区", 分地区_平均薪资_values[1:])
        .set_global_opts(
            title_opts=opts.TitleOpts(title=job + "-" + area + "分地区-中位数薪资"),
            brush_opts=opts.BrushOpts(),
        )
    )

    df_year_edulevel = 非薪资面议.groupby(["工作年限", "学历"]).agg({"平均薪资": "mean"})
    year_list = ["1-3年", "3-5年", "5-10年"]
    edulevel_list = ["大专", "本科", "本科及以上", "统招本科"]
    result_dict = {}
    for year in year_list:
        for edulevel in edulevel_list:
            avg_salary = round(
                df_year_edulevel.loc[(slice(year, year), edulevel), "平均薪资"][0]
            )
            result_dict[f"{year}-{edulevel}"] = avg_salary
    result_list = list(result_dict.values())

    new_list = []
    for i in range(0, len(result_list), 3):
        new_list.append(result_list[i : i + 3])
    df_year_edulevel.values.tolist()
    arr = np.array(df_year_edulevel)
    min_val = round(arr.min())
    max_val = round(arr.max())
    s = (
        Scatter()
        .add_xaxis(year_list)
        .add_yaxis("大专", new_list[0])
        .add_yaxis("本科", new_list[1])
        .add_yaxis("本科及以上", new_list[2])
        .add_yaxis("统招本科", new_list[3])
        .set_global_opts(
            title_opts=opts.TitleOpts(title=job + "-" + area + "分学历&工作年限-中位数薪资"),
            visualmap_opts=opts.VisualMapOpts(type_="size", max_=max_val, min_=min_val),
        )
    )
    return c, s


def hangyeSalary(job, area, output_time):
    df = pd.read_excel(job + "_liepin_" + output_time + ".xlsx")
    df_PM_gz = df[
        [
            "job.labels",
            "job.refreshTime",
            "job.title",
            "job.salary",
            "job.dq",
            "job.topJob",
            "job.requireWorkYears",
            "job.requireEduLevel",
            "comp.compStage",
            "comp.compName",
            "comp.compIndustry",
            "comp.compScale",
        ]
    ]
    df_PM_gz = df_PM_gz.rename(
        columns={
            "job.labels": "职位标签",
            "job.refreshTime": "职位更新时间",
            "job.title": "职位",
            "job.salary": "薪资",
            "job.dq": "地区",
            "job.topJob": "是否top职位",
            "job.requireWorkYears": "工作年限",
            "job.requireEduLevel": "学历",
            "comp.compStage": "公司融资情况",
            "comp.compName": "公司名称",
            "comp.compIndustry": "行业",
            "comp.compScale": "规模",
        }
    )
    非薪资面议 = df_PM_gz[~df_PM_gz["薪资"].str.contains("面议|元/天")]
    非薪资面议_detail = 非薪资面议["薪资"].apply(lambda x: x.split("薪")[0].split("·")).tolist()
    平均薪资 = [
        (int(i[0].split("-")[0]) + int(i[0].split("-")[1].split("k")[0])) / 2
        if len(i) == 1
        else round(
            (int(i[0].split("-")[0]) + int(i[0].split("-")[1].split("k")[0]))
            / 2
            * int(i[1])
            / 12,
            1,
        )
        for i in 非薪资面议_detail
    ]
    非薪资面议["平均薪资"] = 平均薪资

    df_industry = 非薪资面议.groupby('行业').agg({'平均薪资':'mean'})
    df_sorted = df_industry.sort_values(by='平均薪资', ascending=False)
    top_6 = df_sorted.head(6)  # 取前10个
    bottom_6 = df_sorted.tail(6)  # 取后10个
    df_sorted = pd.concat([top_6, bottom_6])
    行业 = df_sorted.index.tolist()
    薪资 = df_sorted.values.tolist()
    arr = []
    for i in range(len(行业)):
        d = {'industry': 行业[i], 'salary': 薪资[i]}
        arr.append(d)
    newarr = []
    for item in arr:
        salary = round(item['salary'][0])
        newarr.append([salary, item['industry']])
        
    c = (
        Bar()
        .add_dataset(
            newarr
        )
        .add_yaxis(
            series_name="",
            y_axis=[],
            encode={"x": "amount", "y": "product"},
            label_opts=opts.LabelOpts(is_show=False),
        )
        .set_global_opts(
            title_opts=opts.TitleOpts(title=area+"_"+job + " 岗位行业薪资排序【选取最高6个和最低6个】"),
            xaxis_opts=opts.AxisOpts(name="amount"),
            yaxis_opts=opts.AxisOpts(type_="category"),
            visualmap_opts=opts.VisualMapOpts(
                orient="horizontal",
                pos_left="center",
                min_=10,
                max_=100,
                range_text=["High Salary", "Low Salary"],
                dimension=0,
                range_color=["#80D0C7", "#0093E9"],
            ),
        )
    )
    return c
