import time
import scrapy
import datetime
from wyjob.items import WyjobItem
from DrissionPage import ChromiumPage


class JobSpider(scrapy.Spider):
    name = "job"
    allowed_domains = ["163.com"]
    start_urls = ["https://hr.163.com/job-list.html"]

    def __init__(self, *args, **kwargs):
        super(JobSpider, self).__init__(*args, **kwargs)
        self.page = ChromiumPage(timeout=15)

    def parse(self, response):
        item = WyjobItem()
        # 监听数据包
        self.page.listen.start('/api/hr163/position/queryPage')
        try:
            self.page.get(url=response.url, retry=3, interval=2)
            while True:
                resp = self.page.listen.wait()
                json_data = resp.response.body
                for info in json_data['data']['list']:
                    item['id'] = info['id']
                    item['name'] = info['name']
                    item['productName'] = info['productName']
                    item['reqEducation'] = info['reqEducationName']
                    item['reqWorkYears'] = info['reqWorkYearsName']
                    item['description'] = info['description']
                    item['requirement'] = info['requirement']
                    item['recruitNum'] = info['recruitNum']
                    item['workPlaceName'] = info['workPlaceNameList'][0]
                    timestamp = info['updateTime']
                    # 毫秒转换为秒
                    timestamp_seconds = timestamp / 1000
                    # 将 时间戳转换为日期格式
                    item['updateTime'] = datetime.datetime.fromtimestamp(timestamp_seconds).strftime('%Y-%m-%d')
                    yield item
                time.sleep(3)
                # 翻页
                ele = self.page('x://div[@class="ant-spin-container"]//li[@class=" ant-pagination-next"]')
                if not ele:
                    break
                else:
                    ele.click()
        except Exception as e:
            print(e)
        finally:
            self.page.close()
