import json
import os
import random
import time
import re
import requests  # 请求数据
from bs4 import BeautifulSoup
from urllib import parse

global user_agents
global proxy_list

# 请求头/代理ip,随机抽取，提高爬虫安全性
user_agents = [
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
    "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
    "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0",
    "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
    "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
    "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
    "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
    "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
    "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
]
proxy_list = ["218.91.13.2:46332",
              "121.31.176.85:8123",
              "218.71.161.56:80",
              "49.85.1.230:28643",
              "115.221.121.165:41674",
              "123.55.177.237:808"
              ]
proxy = {'Proxies': random.choice(proxy_list)}
header = {'User-Agent': random.choice(user_agents)}


def getAPage(url, encode="gbk"):
    '''
    请求获取网页内容
    :param url:
    :param encode:
    :return:
    '''
    time.sleep(3)
    html = ""
    try:
        respone = requests.get(url, headers=header, proxies=proxy)
        if respone.status_code == 200:
            html = respone.content.decode(encode)
        # print(html)
    except Exception as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html


def bs4html(html):
    '''
    BeautifulSoup加载html
    :param html:
    :return:
    '''
    soup = BeautifulSoup(html, 'lxml')  # 初始化BeautifulSoup
    return soup


def readFile(filePath):
    # 读取文件内容到变量里面
    file_obj = open(filePath, 'r')  # 以读方式打开文件名为douban.html的文件
    file = file_obj.read()  # 把文件的内容全部读取出来并赋值给html变量
    file_obj.close()  # 关闭文件对象
    return file


def save_file(dir, filename, content):
    # 如果目录不存在，则新建
    if not os.path.exists(dir):
        os.makedirs(dir)
    save_dir = dir + '/' + filename + '.txt'
    # 注意：win系统默认新文件编码格式gbk,这里需指定utf-8编码
    with open(save_dir, 'w', encoding='utf-8') as f:
        f.write(content)
    print('write has ok.....')


def getSearchPageData(url):
    '''
    获取搜索页中各个招聘信息url
    :param url:
    :return:
    '''
    # html = getAPage(url, "gbk")

    html = open(r"/51job/searchPage.html", "rb")
    soup = bs4html(html)
    # print(soup)
    app = soup.find_all("script", type="text/javascript")
    jsonData = {}
    for a in app:
        search_result = re.search("window.__SEARCH_RESULT__ = (.*)</script>", str(a))
        if search_result != None:
            strData: str = search_result[1]
            jsonData = json.loads(strData)

    return jsonData


def getDetailedPageData(jsonData, searchWord="java"):
    """
    获取招聘详情页各个数据
    :param url:
    :param searchWord:
    :return:
    """
    data = []

    top_ads = jsonData["top_ads"]
    auction_ads = jsonData["auction_ads"]
    market_ads = jsonData["market_ads"]
    engine_search_result = jsonData["engine_search_result"]

    for i in range(len(top_ads)):
        url = top_ads[i]["job_href"]
        html = getAPage(url, "gbk")
        soup = bs4html(html)

        # 左侧主要信息
        mainInfo = soup.find("div", class_="tCompany_main")
        msg_inbox = mainInfo.find("div", class_="bmsg job_msg inbox").text

        # 工作职能类别,工作关键字
        wkName, jobKeywd = "Not specified", "Not specified"
        # 职位信息
        jobInfo = ""
        for p in mainInfo.find("div", class_="bmsg job_msg inbox").find_all("p"):
            p = p.text
            if "职能类别：" in p:
                wkName = p[5:]
            if "关键字：" in p:
                jobKeywd = p[4:]
            else:
                jobInfo += (p + "\n")

        # 工作联系方式
        contactWay = mainInfo.find("div", class_="bmsg inbox").find("p", class_="fp").text
        job_name = top_ads[i]["job_name"]
        company_name = top_ads[i]["company_name"]
        providesalary = top_ads[i]["providesalary_text"]
        workarea = top_ads[i]["workarea_text"]
        updatedate = top_ads[i]["updatedate"]
        companytype = top_ads[i]["companytype_text"]
        jobwelf = top_ads[i]["jobwelf_list"]
        attribute = top_ads[i]["attribute_text"]
        companysize = top_ads[i]["companysize_text"]
        companyind = top_ads[i]["companyind_text"]

        data = {"searchWord": searchWord, "job_name": job_name,
                "providesalary": providesalary, "company_name": company_name, "companytype": companytype,
                "companysize": companysize, "companyind": companyind, "workarea": workarea,
                "attribute": attribute, "updatedate": updatedate, "jobwelf": jobwelf,
                "wkName": wkName, "jobKeywd":jobKeywd,"contactWay": contactWay,"jobInfo": jobInfo, "detailUrl": url}


    for i in range(len(auction_ads)):
        # TODO 遍历当前节点下所有的工作详情页（复用上for循环）
        pass

    for i in range(len(market_ads)):
        pass


    for i in range(len(engine_search_result)):
        pass

    # html = getAPage(url, "gbk")
    # html = open(r"E:/myPython/pac/51job/testFile/detailedPage.html", "rb")
    # soup = bs4html(html)
    #
    # # 51job发布链接
    # detailUrl = url
    # # 头部信息
    # headInfo = soup.find("div", class_="cn")
    #
    # # 招聘职位
    # title = headInfo.find("h1").text
    #
    # # 薪资
    # salary = headInfo.find("strong").text
    # salary = "未注明" if len(salary) == 0 else salary
    #
    # # 公司名
    # companyName = headInfo.find("p", class_="cname").find("a", class_="catn")["title"]
    #
    # # 招聘信息标签
    # msg = headInfo.find("p", class_="msg ltype").text
    #
    # try:
    #     msgList = msg.split("  |  ")
    #     # 地区
    #     area = msgList[0]
    #     # 工作经验时间
    #     wkTime = msgList[1]
    #     # 学历
    #     school = msgList[2]
    #     # 招聘人数
    #     need = msgList[3]
    #     # 招聘发布时间
    #     releaseDate = msgList[4]
    # except Exception as  e:
    #     print("招聘信息标签这里出现问题...,系统报错：", e)
    # finally:
    #     area, wkTime, school, need, releaseDate = "", "", "", "", "",
    # # 福利
    # jtag = headInfo.find("div", class_="jtag").find("div", class_="t1")
    # welfare = [item.text for item in jtag.find_all("span", class_="sp4")]
    #
    # # 右侧信息
    # comtags = soup.find("div", class_="com_tag").find_all("p", class_="at")
    # tags = [str(tag["title"]) for tag in comtags]
    # # 公司类型
    # cpType = tags[0]
    # # 公司人数规模
    # cpper = tags[1]
    # # 公司从事领域
    # field = tags[2]
    #
    # # 左侧主要信息
    # mainInfo = soup.find("div", class_="tCompany_main")
    # msg_inbox = mainInfo.find("div", class_="bmsg job_msg inbox").text
    #
    # # 工作职能类别,工作关键字
    # wkName, keywd = "Not specified", "Not specified"
    # # 职位信息
    # jobInfo = ""
    # for p in mainInfo.find("div", class_="bmsg job_msg inbox").find_all("p"):
    #     p = p.text
    #     if "职能类别：" in p:
    #         wkName = p[5:]
    #     if "关键字：" in p:
    #         keywd = p[4:]
    #     else:
    #         jobInfo += (p + "\n")
    #
    # # 工作联系方式
    # contactWay = mainInfo.find("div", class_="bmsg inbox").find("p", class_="fp").text

    # data = {"searchWord": searchWord, "job_name": title,
    #         "providesalary": salary, "company_name": companyName, "companytype": cpType,
    #         "companysize": cpper, "companyind": field, "workarea": area,
    #         "wkTime": wkTime, "school": school, "need": need,
    #         "releaseDate": releaseDate, "jobwelf": welfare,
    #         "wkName": wkName, "contactWay": contactWay,
    #         "jobInfo": jobInfo, "detailUrl": detailUrl}

    return data


# def
def pullAllData(searchWord, maxPage=5, city="040000"):
    #   当前默认工作地点为深圳地区，baseUrl的list后一参数/040000，广州030200
    # 适应url中关键字为中文的编码
    searchWordQuote = parse.quote(parse.quote(searchWord))
    urlPool = []
    allData = []
    baseUrl = 'https://search.51job.com'
    for page in range(1, maxPage):
        url = f'{baseUrl}/list/{city},000000,0000,01,9,99,{searchWordQuote},2,{page}.html'
        print(url)
        urlList = getSearchPageData(url)
        urlPool.append(urlList)
    # print(urlPool)
    j = 1
    for urlList in urlPool:
        i = 1
        for url in urlList:
            data = getDetailedPageData(url, searchWord)
            allData.append(data)
            i += 1
            print("正进行第{}页，第{}条".format(j, i))
        j += 1
    return allData


if __name__ == '__main__':
    # try:
    #     data = pullAllData("java", 5)
    #     save_dir = os.path.abspath('../')
    #     save_file(save_dir, "51job工作信息", data)
    # except Exception as e:
    #     print("发生了这个事情：", e)

    # top_ads[0].job_name
    # auction_ads[0].job_name
    # engine_search_result[12].job_name

    data = (getSearchPageData("kk"))
    # print(data."top_ads"[0]."job_name")
    # jsonb = {"top":[{"type":"abs"}],"auction_ads":"hh"}
    # print(data["top_ads"][0]["job_name"])
    # print(data["auction_ads"][0]["job_name"])
    # print(data["engine_search_result"][0]["job_name"])
    # print(len(data["engine_search_result"]))
    top_ads = data["top_ads"]
    top_ads = data["engine_search_result"]
    # top_ads = data["top_ads"]

    # print(top_ads)
    # print(len((top_ads)))
    for i in range(len(top_ads)):
        print(top_ads[i]["job_name"])
        print(top_ads[i]["company_name"])
        print(top_ads[i]["providesalary_text"])
        print(top_ads[i]["workarea_text"])
        print(top_ads[i]["updatedate"])
        print(top_ads[i]["companytype_text"])
        print(top_ads[i]["jobwelf_list"])
        print(top_ads[i]["attribute_text"])
        print(top_ads[i]["companysize_text"])
        print(top_ads[i]["companyind_text"])
