import requests
import json
import pandas as pd
import time
from pyecharts import options as opts
from pyecharts.charts import Map,Line,WordCloud,Pie
from pyecharts.globals import SymbolType
from pyecharts.faker import Faker

def liepin_data(用户输入的地区, 用户输入职位):
    url = "https://apic.liepin.com/api/com.liepin.searchfront4c.pc-search-job"
    地区编码字典 = {
        '广州': '050020',
        '深圳': '050090',
        '北京': '010',
        '珠海': '050140'
    }
    payload = {
        "data": {
            "mainSearchPcConditionForm": {
                "city": 地区编码字典[用户输入的地区],
                "dq": 地区编码字典[用户输入的地区],
                "pubTime": "",
                "currentPage": 0,
                "pageSize": 40,
                "key": 用户输入职位,
                "suggestTag": "",
                "workYearCode": "0",
                "compId": "",
                "compName": "",
                "compTag": "",
                "industry": "",
                "salary": "",
                "jobKind": "",
                "compScale": "",
                "compKind": "",
                "compStage": "",
                "eduLevel": ""
            },
            "passThroughForm": {
                "scene": "input",
                "skId": "",
                "fkId": "",
                "ckId": "oivpdy2t5a5mkw11b9ivw708y93lve6b",
                "suggest": None
            }
        }
    }

    # set the headers
    headers = {
        'Accept': 'application/json, text/plain, */*',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
        'Content-Length': '412',
        'Content-Type': 'application/json;charset=UTF-8;',
        'Cookie': 'inited_user=766d20e9b912efc262b5c2f95513c048; __uuid=1687527818887.98; __gc_id=caf909ec2b4b42ae83c2d19c0e0a63ab; need_bind_tel=false; _ga=GA1.1.1338595735.1687527959; access_system=C; user_roles=0; XSRF-TOKEN=GlGgN0clTpKjxH1s7JiPSg; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1687527853,1687607983,1687611770,1687614429; __tlog=1687614429104.24%7C00000000%7C00000000%7Cs_o_001%7Cs_o_001; imApp_0=1; UniqueKey=e30b98cf1b27d3faa9dba665e9526a14; liepin_login_valid=0; lt_auth=6egPbn0EmQqv4CXcgTBesaxPjtOqWGjIoC9ZhUhWhN7tXKLh4P%2FmRAqGr7EE%2FioIqx5xc%2FozMLb2Mu7%2FzXVI40ca%2BlGkkIC0uuW52WEBTuJcN8W2vezHl8zRQpQcl0AC8nFbtkIL%2BQ%3D%3D; new_user=true; c_flag=0bb698688660989c0df7c9ccf4b89e18; user_photo=5f8fa3b979c7cc70efbf445908u.png; user_name=%E5%BA%84%E5%B0%8F%E5%A8%9C; imId=0a2e1e9963b92abb88c4bf85a6a1d035; imId_0=0a2e1e9963b92abb88c4bf85a6a1d035; imClientId=0a2e1e9963b92abba88eb36c289dcebc; imClientId_0=0a2e1e9963b92abba88eb36c289dcebc; inited_user=766d20e9b912efc262b5c2f95513c048; __uv_seq=35; __session_seq=18; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1687615467; fe_im_socketSequence_new_0=11_11_11; fe_im_opened_pages=; fe_im_connectJson_0=%7B%220_e30b98cf1b27d3faa9dba665e9526a14%22%3A%7B%22socketConnect%22%3A%222%22%2C%22connectDomain%22%3A%22liepin.com%22%7D%7D; _ga_54YTJKWN86=GS1.1.1687614443.6.1.1687616386.0.0.0',
        'Host': 'apic.liepin.com',
        'Origin': 'https://www.liepin.com',
        'Pragma': 'no-cache',
        'Referer': 'https://www.liepin.com/',
        'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
        'Sec-Fetch-Dest': 'empty',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Site': 'same-site',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36 Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0) Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
        'X-Client-Type': 'web',
        'X-Fscp-Bi-Stat': '{"location": "https://www.liepin.com/zhaopin/?inputFrom=www_index&workYearCode=0&key=%E4%BA%A7%E5%93%81%E7%BB%8F%E7%90%86&scene=input&ckId=htihov8m2frxgy6ywo2wsg2gncnydzlb&dq="}',
        'X-Fscp-Fe-Version': '',
        'X-Fscp-Std-Info': '{"client_id": "40108"}',
        'X-Fscp-Trace-Id': '5e878e98-67be-4743-82a0-8191362cb1e2',
        'X-Fscp-Version': '1.1',
        'X-Requested-With': 'XMLHttpRequest',
        'X-XSRF-TOKEN': 'm2dqFIn4Tbykc5zJxtb_4g'
    }

    # 1. 通过首次请求获取数据页面信息
    r = requests.post(url, data=json.dumps(payload), headers=headers)
    response_data = r.json()
    page = response_data['data']['pagination']['totalPage']

    # 2. 翻页获取所有数据
    response_df = []
    for i in range(page):  # 需要判断页面的数据有多少页
        payload['data']['mainSearchPcConditionForm']['currentPage'] = i
        r = requests.post(url, data=json.dumps(payload), headers=headers)
        response_data = r.json()
        df = pd.json_normalize(response_data['data']['data']['jobCardList'])
        response_df.append(df)

    # 3. 整理表格并输出表格数据
    df = pd.concat(response_df)
    key = payload['data']['mainSearchPcConditionForm']['key']
    output_time = str(time.localtime().tm_mon) \
                  + str(time.localtime().tm_mday) + '_' \
                  + str(time.localtime().tm_hour) \
                  + str(time.localtime().tm_min)
    df.to_excel(key + '_liepin_' + output_time + '.xlsx')
    # 4. 返回值
    return "当前数据已导出，数据量为：", len(df), "行"


def liepin_dq(用户输入的地区, 用户输入职位):
    """ liepin数据地区分布数据分析及可视化 """
    output_time = str(time.localtime().tm_mon) \
                  + str(time.localtime().tm_mday) + '_' \
                  + str(time.localtime().tm_hour) \
                  + str(time.localtime().tm_min)
    df = pd.read_excel(f'{用户输入职位}_liepin_{output_time}.xlsx')
    df_PM_gz = df[
        ['job.labels', 'job.refreshTime', 'job.title', 'job.salary', 'job.dq', 'job.topJob', 'job.requireWorkYears',
         'job.requireEduLevel', 'comp.compStage', 'comp.compName', 'comp.compIndustry', 'comp.compScale']]
    x_data = [i.split('-')[1] for i in df_PM_gz['job.dq'].value_counts().index.tolist() if '-' in i]
    y_data = df_PM_gz['job.dq'].value_counts().values.tolist()[1:]
    c = (
        Map()
            .add('职位数量', [list(z) for z in zip(x_data, y_data)], 用户输入的地区)
            .set_global_opts(
            title_opts=opts.TitleOpts(title='Map-' + 用户输入的地区 + '地图'), visualmap_opts=opts.VisualMapOpts()
        )
    )
    return c

def liepin_cy(用户输入的地区, 公司产业):
    """ liepin数据地区分布数据分析及可视化 """
    output_time = str(time.localtime().tm_mon) \
                  + str(time.localtime().tm_mday) + '_' \
                  + str(time.localtime().tm_hour) \
                  + str(time.localtime().tm_min)
    df = pd.read_excel(f'{公司产业}_liepin_{output_time}.xlsx')
    df_PM_gz = df[
        ['job.labels', 'job.refreshTime', 'job.title', 'job.salary', 'job.dq', 'job.topJob', 'job.requireWorkYears',
         'job.requireEduLevel', 'comp.compStage', 'comp.compName', 'comp.compIndustry', 'comp.compScale']]
    df_PM_gz['job.requireEduLevel'].value_counts()
    df_PM_gz['job.requireEduLevel'].value_counts().index.tolist()
    df_PM_gz['job.requireEduLevel'].value_counts().values.tolist()
    w=(
        Pie(init_opts=opts.InitOpts(width="380px",height="280px"))
            .add("", [list(z) for z in zip(df_PM_gz['job.requireEduLevel'].value_counts().index.tolist(),
                                           df_PM_gz['job.requireEduLevel'].value_counts().values.tolist())])
            .set_colors(["red", "orange", "yellow"])
            .set_global_opts(title_opts=opts.TitleOpts(title="学历要求"))
            .set_series_opts(label_opts=opts.LabelOpts(formatter="{b}: {c}"))
    )
    return w

def liepin_dd(用户输入的地区, 公司产业):
    """ liepin数据地区分布数据分析及可视化 """
    output_time = str(time.localtime().tm_mon) \
                  + str(time.localtime().tm_mday) + '_' \
                  + str(time.localtime().tm_hour) \
                  + str(time.localtime().tm_min)
    df = pd.read_excel(f'{公司产业}_liepin_{output_time}.xlsx')
    df_PM_gz = df[
        ['job.labels', 'job.refreshTime', 'job.title', 'job.salary', 'job.dq', 'job.topJob', 'job.requireWorkYears',
         'job.requireEduLevel', 'comp.compStage', 'comp.compName', 'comp.compIndustry', 'comp.compScale']]
    df_Industry = df_PM_gz['comp.compIndustry'].apply(lambda x:str(x).split('（')[0].split('/')[0].split('(')[0]).value_counts()
    df_Industry.index.tolist()
    len(df_Industry.index.tolist())
    df_Industry.values.tolist()
    PM_title_words = [(df_Industry.index.tolist()[i], df_Industry.values.tolist()[i]) for i in
                      range(1, len(df_Industry.index.tolist()))]
    e=(
        WordCloud(init_opts=opts.InitOpts(width="400px",height="280px"))
            .add("", PM_title_words, word_size_range=[40, 100], shape=SymbolType.DIAMOND)
            .set_global_opts(title_opts=opts.TitleOpts(title="产业分布词云图"))
    )
    return e




