from bs4 import BeautifulSoup
from urllib import request
from bs4 import UnicodeDammit

from io import BytesIO
import gzip
import re
import json

# 前程无忧，以java为关键字的职位信息

# 分析数据的来源  todo

file = open("Q:/shilu/data/jobs.csv", "a")

# url = "https://search.51job.com/list/000000,000000,0000,00,9,99,java,2,5.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare="
startUrl = "https://search.51job.com/list/000000,000000,0000,00,9,99,java,2,{0}.html"
urlList = []
for i in range(1, 1000):
    # print(startUrl.format(i))
    urlList.append(startUrl.format(i))

for url in urlList:
    print("准备下载 {0}".format(url))
    try:
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre",
            "Cookie": "_uab_collina=164784329172586387706753; acw_tc=2f624a4316478432896694041e2fed7de42d820880669bd80e5c7d752a162b; acw_sc__v2=623817d9bc19ef24324f5f88c6f986f360a4e77e; guid=e2a9e3f2a02d3d523ff83dd2055c0710; nsearch=jobarea%3D%26%7C%26ord_field%3D%26%7C%26recentSearch0%3D%26%7C%26recentSearch1%3D%26%7C%26recentSearch2%3D%26%7C%26recentSearch3%3D%26%7C%26recentSearch4%3D%26%7C%26collapse_expansion%3D; search=jobarea%7E%60000000%7C%21ord_field%7E%600%7C%21recentSearch0%7E%60000000%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FAjava%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21; ssxmod_itna=Yqjx27eQuD0DBnDzxA2YTW0=NfbTDmqc0DKi=N7DlOC3xA5D8D6DQeGTbnWrbtqtxru3h30eRrdY=8pjOYrL8tg4GLDmKDy7Wr4GG0xBYDQxAYDGDDP2DzL=GuD0KGRD048DFxAtgDFqG0n4=qD0px3jhcD73DUqdDQqDSFWKDxGQSDitnDGYSD0tjD7QS7CNDeqI=kqGWFLxD0Lx8k0IKSnjFkPN5wmbdumudcediymITbQ2FH26Bfq=DbbLrdC6vvwUFVh+aKG4oI7+T8rxqz7mKWGG06iqPtrz4Gi4PtNe=KYmKzg4TUiqDDWKAxVuz4D; ssxmod_itna2=Yqjx27eQuD0DBnDzxA2YTW0=NfbTDmqc0DKi=ND6E87x0yuEx03E=nKqKvpXSBKAhbUKetZ45GFtpMm+mnWfo5hH3LElrt=r2bGLHqhfS1G+Rd1eForpp7MzQkfX5WFo8pG=n6oUvi8LyAghsR4jUST3RQQxE9R0xmLKq8Bp4mb58j4tjUTRSFniPnMrqG8DR0+YmIWTnSjoj8bRj9nrrLoH=xZvMnf0sF3RHfqQqLL9HDwo84gpOrLdGrwWElPBj2F7GyZ5EjKTU=W=hHSknP/F8Wc3gFNd2yj6cqVeR9rmL3afISf=c2HQxr4wGbhYK83SxOHrqdohtW8Klh8453WqSh33lYFlOte+nDGeD1cD8Q2=kxGkAHKCKhuG3G4GhQSYxVAo=Crkl5krvkErVDOLqi2LiLqW4Tp4w3duvYcK3pT4Lo+qQretL2DNiqqB3PQ4tW3=jzM2G=L7hewwcGEnpbhvtk3Wmr6p6CeUKCi2pzPujtThL8Ou6z3CiqUOoWPD7Qkz+ZEGd/Rkmw1l7AEG1=gYljTDGcDG7Hfxp7558LC1D4D="}
        req = request.Request(url=url, headers=headers)
        data = request.urlopen(req)
        data = data.read()
        # print(data.decode(encoding="gbk"))
        data = data.decode(encoding="gbk")
        # print(data)
        print("内容的字节数是 {0}".format(len(data)))

        # 使用正则表达式，找到需要部分的字符串
        result = re.findall(r"window.__SEARCH_RESULT__ = (.+?)</script>", data)
        # 字符串转Json对象
        resultJson = json.loads(result[0])
        # print(resultJson)
        jobList = resultJson['engine_jds']
        for job in jobList:
            job_name = job['job_name']
            company_name = job['company_name']
            company_href = job['company_href']
            providesalary_text = job['providesalary_text']
            workarea_text = job['workarea_text']
            companytype_text = job['companytype_text']
            jobwelf = job['jobwelf']
            companysize_text = job['companysize_text']
            companyind_text = job['companyind_text']
            jobid = job['jobid']
            coid = job['coid']
            line = job_name + "\t" + company_name + "\t" + company_href + "\t" + providesalary_text + "\t" + workarea_text \
                   + "\t" + companytype_text + "\t" + jobwelf + "\t" + companysize_text + "\t" + companyind_text \
                   + "\t" + jobid + "\t" + coid + "\n"
            file.write(line)
            print(line)


    except Exception as err:
        print(err)
