# -*- coding:utf-8 -*-

import random
import requests
import re
import json
from pyquery import PyQuery as pq
import time

# 头部浏览器参数
def get_ua():
    # user-agent池，返回随机ua
    ua_list = ['Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; Tablet PC 2.0; .NET4.0E)',
    'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; InfoPath.3)',
    'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0',
    'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E)',
    'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36']
    return random.choice(ua_list)

class JobSpider():

    def __init__(self):
        self.work_key = ['java', 'python', 'golang', 'php', '前端']
        self.work_data = []

    def get_page(self, key_word, page = 1):
        # 拼接网址，定义搜索内容
        url = f'https://search.51job.com/list/240200%252c010000%252c020000%252c030200,000000,0000,00,9,99,{key_word},2,{page}.html'
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'User-Agent': get_ua(),
        }
        time.sleep(0.2)
        get_page_el = requests.get(url=url,headers=headers)
        page_text =get_page_el.text
        # 获取内容
        if '前程无忧' in page_text:
            json_str = re.findall('window\.__SEARCH_RESULT__ = (.*?)</script>', page_text)[0]
            json_data = json.loads(json_str)
            work_json_data = json_data.get('engine_jds')
            add_detail_data = self.get_detail_page(work_json_data)
            if len(add_detail_data) > 0:
                self.work_data.extend(add_detail_data)
            # 总页码
            total_page_str = json_data['total_page']
            total_page = int(total_page_str)
            new_page = page
            if new_page <= (total_page - 679):
                print(f'正在读取第{new_page + 1}页内容...')
                self.get_page(key_word, new_page + 1)
            else:
                print('读取完毕!')
        else:
            print('读取异常!')


    def get_detail_page(self, parent_data):
        time.sleep(1)
        # 拼接网址，定义搜索内容
        if len(parent_data) > 0:
            for data in parent_data:
                # 拼接网址，定义搜索内容
                url = data.get('job_href')
                headers = {
                    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
                    'Accept-Encoding': 'gzip, deflate, br',
                    'Accept-Language': 'zh-CN,zh;q=0.9',
                    'Cache-Control': 'max-age=0',
                    'Connection': 'keep-alive',
                    'User-Agent': get_ua(),
                }
                # detail_page_ed = requests.get(url=url,headers=headers)
                detail_page_ed = '''
                    <div class="tBorderTop_box">
                        <div class="bmsg job_msg inbox">
                            啥都得会呀
                        </div>
                    </div>
                '''
                try:
                    detail_page = detail_page_ed.content.decode('gbk')
                except:
                    detail_page = detail_page_ed
                doc = pq(detail_page)
                detail_con = doc('div.tBorderTop_box div.bmsg.job_msg.inbox').text()
                data['detail_content_txt'] = detail_con if detail_con else 'IP已被检测'
            return parent_data
        else:
            print('数据异常!')
    # 组装数据
    def format_data(self, data_list):
        for row_item in data_list.items():
            detail_url = row_item.attr('href')
            work_dict = {}
            work_dict['title'] = row_item('strong span').text()
            work_dict['price'] = row_item('i').text()
            work_dict['add'] = row_item('em').text()
            work_dict['needs'] = row_item('p').text()
            work_dict['reward'] = []
            reward_list = row_item('.tabs span.fl')
            for reward_item in reward_list.items():
                work_dict['reward'].append(reward_item.text())
            work_dict['company_name'] = row_item('aside').text()
            work_dict['detail_url'] = detail_url
            self.work_data.append(work_dict)
            print(self.work_data)

    def run(self):
        """控制代码运行"""
        # 收集3页数据
        self.get_page(self.work_key[0])


if __name__ == '__main__':
    spider = JobSpider()
    spider.run()
    print(len(spider.work_data), spider.work_data)