﻿# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'Yang'
__mtime__ = '2018/10/28'
# code is far away from bugs with the god animal protecting
    I love animals. They taste delicious.
              ┏┓      ┏┓
            ┏┛┻━━━┛┻┓
            ┃      ☃      ┃
            ┃  ┳┛  ┗┳  ┃
            ┃      ┻      ┃
            ┗━┓      ┏━┛
                ┃      ┗━━━┓
                ┃  神兽保佑    ┣┓
                ┃　永无BUG！   ┏┛
                ┗┓┓┏━┳┓┏┛
                  ┃┫┫  ┃┫┫
                  ┗┻┛  ┗┻┛
"""
import requests
import re
import time

url = "http://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E9%80%89%E6%8B%A9%E5%9C%B0%E5%8C%BA&kw=erp&sm=0&sf=0&st=99999&el=5&isadv=1&sg=d1e6fa6df3f24eaa8eb9815232ffd61d&p={}"
job = 'https://search.51job.com/list/000000,000000,0000,00,9,99,%25E5%25A4%25A7%25E6%2595%25B0%25E6%258D%25AE,2,{}.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='


# 1. 采集出北京python职位的要求信息（信息数量不少于300条件）
# 2. 采集出的信息有：a. 薪资b. 公司名称c. 公司地址d. 职位描述

#   <a target="_blank" title="大数据开发工程师 244382" href="https://jobs.51job.com/wuhan/107014424.html?s=01&t=0" onmousedown="">
#                                     大数据开发工程师 244382                                </a>
def get_page_text(u):
    page = requests.get(u)
    page.encoding = page.apparent_encoding
    return page.text


with open('c:/python_pc/51/51job.csv', 'w', encoding='utf8')as f_51job:
    for p_index in range(1, 1146):
        print('page: ' + str(p_index))
        page_url = job.format(p_index)
        page_text = get_page_text(page_url)
        page_text_url_list = re.findall('<a.target="_blank".title="(.*?)".href="(https.*?)".onmousedown=".*?">',
                                        page_text)
        if (len(page_text_url_list) == 0):
            with open('c:/python_pc/51/page_not_have_date.csv', 'w', encoding='utf8') as page_not_have_date:
                page_not_have_date.write(page_url + "\n")
            continue
        for i in page_text_url_list:
            try:
                print('page: ' + str(p_index) + '; ' + 'index: ' + str(i))
                page_text1 = get_page_text(i[1])
                money = re.findall('<strong>(.*?)</strong>', page_text1)[1]
                # <a href="https://jobs.51job.com/all/co154736.html" target="_blank" title="华为技术有限公司" class="catn">
                company_name = re.findall('<a.href=".*?".target="_blank".title="(.*?)".class="catn">', page_text1)
                # <span class="label">上班地址：</span>高新西区西源大道							</p>
                company_address = re.findall('<span.class="label">上班地址：</span>(.*?)\t\t\t\t\t\t\t</p>', page_text1)
                job_desc = re.findall('<div.class="bmsg.job_msg.inbox">\r\n\t\t\t\t\t\t(.*)', page_text1)
                reg = re.compile(
                    '<span>|</span>|&nbsp;|</p>|<p>|<br>|</li>|<li>|<ul>|</ul>|	|	|<b>|</b>|</div>|<div>|<P>|</P>')
                job_desc = re.sub(reg, '', job_desc[0])
                f_51job.write(
                    str(i) + "::" + str(money) + "::" + str(company_name) + "::" + str(
                        company_address) + "::" + job_desc)
            except:
                with open("c:/python_pc/51/page.txt", "a", encoding='utf8') as f:
                    f.write(str(i) + ",程序异常中止,在" + page_url)

        time.sleep(1)
