#!/usr/bin.env python3
# -*- coding=utf-8 -*-
# 2022.07
# 功能：用于获取智联网生物信息职位在全国范围内的所有招聘信息
#       将其保存在 '智联网_生信职位_' + 当前日期 的 xlsx 文件（本质是txt）下，附以表头和职位链接，方便筛选查看

from urllib import request
from urllib import error
import re
import sys
import time

# 获得网页的 HTML
def GetHTML(Url):
    # 模拟网页用户（爬虫表头）
    head = {'User-Agent' :  'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537    .36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4620.    400 QQBrowser/9.7.13014.400'}
    req = request.Request(Url, headers=head)
    # 处理异常
    try:
        response = request.urlopen(req)
    except error.HTTPError as e1:
        print(e1.code)
        sys.exit()
    except error.URLError as e2:
        print(e2.reason)
        sys.exit()
    # 返回结果 200表示成功
    if response.getcode() == 200:
        # 返回网页 HTML和当前网页的 url
        #return str(response.read().decode('utf-8')), response.geturl()
        return str(response.read().decode('utf8')), response.geturl()
    else:
        print("Error:{}".format(response.getcode()))
        sys.exit()

# 获取二级链接
def GetSecondurl(HTML):
    # 岗位名称的二级链接
    url_format = re.compile(r'href="(http://jobs.zhaopin.com/[^\"]+?.htm)" target="_blank">', re.S)
    urls = url_format.findall(HTML)
    return urls

# 获取信息（字符串形式）
def GetInf(HTML):
   try:
        mc_back = re.search('<div class="inner-left fl".*?<h1>([^<]+?)</h1>.?.?<h2>.?.?<a [^>]+?>([^<]+?)<', HTML, re.S)
        # 职位名称
        zwmc = mc_back.group(1)
        # 公司名称
        gsmc = mc_back.group(2)
        #print(zwmc, gsmc)
        # 福利待遇
        welfare = re.search('class="welfare-tab-box"(.+?)</div>', HTML, re.S).group(1)
        #print(type(welfare))
        welfare = ' '.join(re.findall('<span>(.+?)</span>', str(welfare)))
        #print(welfare)
        # 具体要求信息
        jtxx = re.findall('<li>.?.?<span>(.+?)</span>.?.?<strong>(.+?)</a>', HTML, re.S)
        jtxx = re.sub('&nbsp;|<[^>]+?>', '', '\n'.join([' '.join(list(x)) for x in jtxx]))
        jtxx = re.sub('查看公司地图', '', jtxx)
        jtxx = re.sub(' +', ' ', jtxx)
        jtxx = re.sub('\r', '', jtxx)
        jtxx = re.sub('\n+', '  ', jtxx)
        #print(jtxx, type(jtxx))
        #print(jtxx)
        # 描述信息
        msxx = re.search('(职位描述.+?)</h2>', HTML, re.S).group(1)    #岗位职责、职位描述
        msxx = re.sub('<p>', '\n', msxx)
        msxx = re.sub('<[^>]+?>', '', msxx)
        msxx = re.sub('&nbsp;', ' ', msxx)
        msxx = re.sub('查看职位地图', '', msxx)
        msxx = re.sub('公司介绍', '', msxx)
        msxx = re.sub('举报收藏', '', msxx)
        msxx = re.sub('\t', '',msxx)
        msxx = re.sub('( ?\r?\n)+', '  ', msxx)
        #print(msxx)
        #Inf = '\n'.join([zwmc, gsmc, welfare, jtxx, msxx, '*-* ' * 20, '\n\n'])
        Inf = '\t'.join([zwmc, gsmc, welfare, jtxx, msxx])
        return Inf
   except AttributeError as Aerr:
        print('AttributeError:{}'.format(Aerr))
        #return '\n'.join(['AttributeError', '*-* ' * 20, '\n'])

# 主程序
def main():
    url_i = 'http://sou.zhaopin.com/jobs/searchresult.ashx?jl=%e9%80%89%e6%8b%a9%e5%9c%b0%e5%8c%ba&kw=%e7%94%9f%e7%89%a9%e4%bf%a1%e6%81%af&isadv=0&sg=d6bf37e89b3e4e21b8c0c8dc2d1b57e9&p='
    # 翻页
    pageurl = [url_i + str(x) for x in range(10)]
    # 以当前日期命名输出文件
    localtime = time.localtime(time.time())
    times = str(localtime.tm_year) + '-' + str(localtime.tm_mon) + '-' + str(localtime.tm_mday)
    file_name = '智联网_生信职位_' + times + '.xlsx'
    with open(file_name, 'a+') as o:
        o.write('序号\t职位名称\t公司名称\t福利待遇\t职位描述\t职位要求\t链接\n')
        for num, url in enumerate(pageurl):
            print('正在抓取第{}个网页（{}/{}）'.format(num+1, num+1, len(pageurl)))
            print(url)
            # 二级链接里提取详细的信息
            second_urls = GetSecondurl(GetHTML(url)[0])
            for s, s_url in enumerate(second_urls):
                #print(s+1, ':', s_url)
                inf = GetInf(GetHTML(s_url)[0])
                try:
                    o.write(str(num + 1) + '_' + str(s + 1) + '\t')
                    o.write(inf.encode('gbk', 'ignore').decode('gbk'))
                    o.write('\t' + s_url + '\n')
                except AttributeError as Aerr:
                    print(str(s + 1), ':AttributeError', s_url)
                    time.sleep(3)
                    try:
                        print('-'*10, 'Try Again !!!')
                        inf = GetInf(GetHTML(s_url)[0])
                        o.write(inf.encode('gbk', 'ignore').decode('gbk'))
                        o.write('\t' + s_url + '\n')
                    except AttributeError as Arr:
                        o.write('\t'*5 + s_url + '\n')
                #break
            #break
            #time.sleep(1)

if __name__ == '__main__':
    main()
