import urllib.request as ur
import re
import json
import lxml.etree as le
import xpath_tool
import pymysql

def parse1 (keyword , page):
    url = "https://search.51job.com/list/010000,000000,0000,00,9,99,[keyword],2,[page].html".format(
        keyword , page)

    req = ur.Request(
        url=url,
        headers={
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36"
        }
    )

    content = ur.urlopen(req).read().decode("gbk", "ignore")

    data = json.loads(
        re.findall("window.__SEARCH_RESULT__ =(.*?)</script>", content)[0]
    )
    results = data["engine_search_result"]
    return results

def parse2(url):
    req = ur.Request(
        url = url,
        headers={
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36"
        }
    )
    contentb = ur.urlopen(req).read()
    contentx = le.HTML(contentb)

    try:
        parse2_job_detail = xpath_tool.xpath_union(contents=contentx,
                                                   path='//div[@class="tCompany_main"]/div[@class=tBorderTOP_box"][1]//text()',
                                                   split="", default=None)
        parse2_job_conn = xpath_tool.xpath_union(contents=contentx,
                                                 path='//div[@class="tCompany_main"]/div[@class=tBorderTOP_box"][2]//text()',
                                                 split="", default=None)
        parse2_job_company = xpath_tool.xpath_union(contents=contentx,
                                                    path='//div[@class="tCompany_main"]/div[@class=tBorderTOP_box"][3]//text()',
                                              split="", default=None)
    except le.XPathEvalError:
        pass
    print("====")
    print(parse2_job_detail)
    return dict (
        parse2_job_detail_p = parse2_job_detail,
        parse2_job_conn_p = parse2_job_conn,
        parse2_job_company_p = parse2_job_company,
    )
def spider(keyword,start_page=1,end_page=100):
    for page in range(start_page,end_page+1):
        parse1_datas = parse1(keyword=keyword, page=page)
        for parse1_data in parse1_datas:
            # print(parse1_data)
            job_href = parse1_data["job_href"]
            parse2_data = parse2(url=job_href)
            parse1_data["parse2_job_detail"] = parse2_data["parse2_job_detail"]
            parse1_data["parse2_job_conn"] = parse2_data["parse2_job_conn"]
            parse1_data["parse2_job_company"] = parse2_data["parse2_job_company"]
            # print(parse1_data)
            cs.execute("insert into spider_51job spider_data data values (parse1_data) ,")
            conn.commit()

if __name__ == "__main__":
    keyword = input("请输入检索关键词：")
    start_page = str(input("请输入检索起始页面："))
    end_page = str(input("请输入检索结束页面:"))
    spider(keyword=keyword,start_page=int(start_page),end_page=int(end_page))
    conn = pymysql.Connect(
        host = "localhost",
        port = 3306,
        user = "root",
        password = "123456",
        database = "spider_51job",
        charset = "utf8"
    )
    cs = conn.cursor()
    spider(keyword=keyword, start_page=int(start_page), end_page=int(end_page))