# 1.通过urllib模块模拟浏览器访问URL地址，拿到浏览器响应回来的数据，即整个HTML页面
import urllib.request as ur
import re
import json
import lxml.etree as le
# 2.定义一个URL
# url = 'https://search.51job.com/list/200600%252c010000,000000,0000,00,9,99,Python,2,4.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
#
# # 3.建立一个Request对象
# req = ur.Request(
#     url = url,
#     headers = {
#         'User-Agent':'Mozilla/5.0.html (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.html.2171.71 Safari/537.36',
#     }
# )
#
# # 4.定义返回的网页内容content
# content = ur.urlopen(req).read().decode('gbk','ignore')
# print(content)
#
# # 5.把网页源码保存到本地，看一下是否有前端渲染（查看是否与再网页中检查看到的源码一致）
# with open('51job1.html','w',encoding='gbk') as f:
#     f.write(content)
#
# # 6.导入re模块，使用正则表达式提取出我们需要的招聘信息（第一级的内容）
# ret = re.findall('window.__SEARCH_RESULT__ =(.*?)</script>',content)    # 这里我们取出来的是一个字符串类型，然后需要转换为json-dict类型
# print(ret)
#
# # 7.导入json模块，使用loads命令把jdon字符串对象转换为python列表+字典对象
# data = json.loads(ret[0])
# print(data)
#
# # 8.提取我们需要的工作信息
# results = data['engine_search_result']
# for result in results:
#     print(result)

# 到这我们需要的内容基本完成，下面我们把以上代码规范化，构造一个函数，使我们输入关键字和页面就可以打印出结果

# 9.构造一个函数,能够根据关键字和页面构建一个URL地址，并建立对应的Request对象
def parse1(keyword,page):
    url = 'https://search.51job.com/list/200600%252c010000,000000,0000,00,9,99,{keyword},2,{page}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='.format(
        keyword=keyword,
        page=page,
    )

    req = ur.Request(
        url=url,
        headers={
            'User-Agent': 'Mozilla/5.0.html (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.html.2171.71 Safari/537.36',
        }
    )

    content = ur.urlopen(req).read().decode('gbk','ignore')
# 10.再对返回的字符串对象做一下处理
    ret = re.findall('window.__SEARCH_RESULT__ =(.*?)</script>',content)    # 提取到json字符串
    data = json.loads(ret[0])   # 把json字符串转换成字典
    results = data['engine_search_result']  # 对数据进行提取
    return results

# 11.把上面的代码注释掉，只看构造的函数
# if __name__ == '__main__':
    # print(parse1(keyword='java',page=1))    # 这样一级页面就拿到了


# 12.提取二级页面，先保存到本地
#     contentb = ur.urlopen(url='https://jobs.51job.com/beijing-sjsq/125867216.html?s=01&t=0').read()
#     with open('51job2.html','wb') as f:
#         f.write(contentb)

# 13.对二级页面进行xpath提取，导入lxml.etree包,然后把HTML对象转换为LXML对象
content2 = ur.urlopen(url='https://jobs.51job.com/beijing-sjsq/125867216.html?s=01&t=0').read()
# 14.对content2进行xpath提取，把HTML对象转换为LXML对象,再进行xpath
rets = le.HTML(content2).xpath('//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][1]//text()')
print(rets)

# 15.对提取出的信息删掉空格和收尾换行符
ret_data = [
    ret.strip() for ret in rets
]
print(ret_data)

# 16.对删掉空格和换行符的内容进行合并,用.join命令，以‘ 空字符串’进行连接
ret_data = '\n'.join([
    ret.strip() for ret in rets
])
print(ret_data)

