# -*- coding:utf-8 -*-
# --author： jingfeng 
# time: 2018/11/18



import multiprocessing
import time
import requests
from lxml import etree
from fake_useragent import UserAgent
import re
import os

ua = UserAgent()
headers ={'User-Agent': ua.random}


def get_page_number():
    url = 'https://search.51job.com/list/000000,000000,0000,00,9,99,python,2,1.html'
    response = requests.get(url, headers=headers)
    response.encoding ='gbk'
    html = response.text
    number = re.findall('<span class="td">(.*?)</span>', html)[0]
    page = re.search(r'(\d+)', number).group()

    return page



def get_info(pagelist):

    for i in pagelist:
        url = 'https://search.51job.com/list/000000,000000,0000,00,9,99,python,2,{}.html'.format(str(i))
        print('爬取第{}页'.format(str(i)))

        response = requests.get(url, headers=headers)
        response.encoding = 'gbk'
        html = response.text

        # print(html)
        selector= etree.HTML(html)

        # 职位    公司名   工作地点  薪资 发布时间
        # print(html)
        jobs = selector.xpath('//p[contains(@class,"t1")]/span/a[@target="_blank"]/text()')
        company_names =selector.xpath('//span[@class="t2"]/a[@target="_blank"]/text()')
        adresses = selector.xpath('//span[@class="t3"]/text()')[1:]
        moneys = re.findall(r'<span class="t4">(.*?)</span>',html)[1:]

        times = selector.xpath('//span[@class="t5"]/text()')[1:]
        # print(len(jobs))
        # print(len(company_names))
        # print(len(adresses))
        # print(len(moneys))
        # print(len(times))

        for job, company_name, adress, money, time in zip(jobs,company_names,adresses,moneys,times):

            info_list = [job.strip(),company_name,adress,money,time]
            yield info_list



def get_info_list(pagelist,queue):
    infolist = get_info(pagelist)

    for info in infolist:

        queue.put(info)



start = True
file = open(r'G:\python3code\爬虫实战\多进程\python_51job1.txt','w',encoding='utf-8')
def writedata(queue):
    print('开始')

    while True:
        try:
            data = queue.get(block=True,timeout=1)

            if data:
                print(data)
                file.write(str(data) +'\n')
            else:
                print(11111111111)
        except Exception as e:
            break

        finally:
            pass

if __name__ == "__main__":
    page = get_page_number()

    queue = multiprocessing.Manager().Queue(1000000)
    mylist = [x for x in range(1,eval(page)+1)]

    djlist = [[], [], [], [], [], [], [], [], [], []]
    N = len(djlist)

    for i in range(len(mylist)):
        djlist[i % N].append(mylist[i])  # 取余寻找列表，均匀分配数据

    processlist = []


    for i in range(10):
        process = multiprocessing.Process(target=get_info_list, args=(djlist[i], queue))
        process.start()
        processlist.append(process)

    readdata = multiprocessing.Process(target=writedata, args=(queue,))
    processlist.append(readdata)
    readdata.start()



    for dj in processlist:
        dj.join()










