
from modules import requests, pq, get_ua
import time
class JobSpider():

    def __init__(self):
        self.base_url = 'https://msearch.51job.com/job_list.php?keyword='
        self.work_key = ['java', 'python', 'golang', 'php', '前端']
        self.work_data = []

    def get_page(self, base_url, key_word, key_pre = ''):
        # 拼接网址，定义搜索内容
        url = key_pre + base_url + key_word
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'User-Agent': get_ua(),
        }
        page_de = requests.get(url=url,headers=headers)
        page = page_de.content.decode('UTF-8')
        # 获取列表
        doc=pq(page)
        row_list = doc('#pageContent div.list a')
        self.format_data(row_list)
        # 获取分页url
        next_page_url = doc('.paging a.next').attr('href')
        if key_word in next_page_url:
            self.get_page(next_page_url, '', 'https:')


    def get_detail_page(detail_url):
            # 拼接网址，定义搜索内容
            url = detail_url
            headers = {
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
                'Accept-Encoding': 'gzip, deflate, br',
                'Accept-Language': 'zh-CN,zh;q=0.9',
                'Cache-Control': 'max-age=0',
                'Connection': 'keep-alive',
                'User-Agent': get_ua(),
            }
            page_de = requests.get(url=url,headers=headers)
            page = page_de.content.decode('UTF-8')
            return page

    # 组装数据
    def format_data(self, data_list):
        for row_item in data_list.items():
            detail_url = row_item.attr('href')
            work_dict = {}
            work_dict['title'] = row_item('strong span').text()
            work_dict['price'] = row_item('i').text()
            work_dict['add'] = row_item('em').text()
            work_dict['needs'] = row_item('p').text()
            work_dict['reward'] = []
            reward_list = row_item('.tabs span.fl')
            for reward_item in reward_list.items():
                work_dict['reward'].append(reward_item.text())
            work_dict['company_name'] = row_item('aside').text()
            work_dict['detail_url'] = detail_url
            self.work_data.append(work_dict)
            print(self.work_data)

    def run(self):
        """控制代码运行"""
        # 收集3页数据
        self.get_page(self.base_url, self.work_key[0])
        #for key in len(self.work_key):
        # self.get_page()
        # data_list = []
        # for content in search_content:
        #     print('正在爬取',content)
        #     for num in range(page_num_max):
        #         # 获取网页源码
        #         print('正在爬取第%d页'%(num+1))
        #         html = self.get_page(content)
        #         print(html)


if __name__ == '__main__':
    spider = JobSpider()
    df = spider.run()