# -*- coding: utf-8 -*-
# todo  爬取速度过快，ip被封，要设置随机ip代理池，限制爬取速度
import scrapy,requests
from wangyi.items import WangyiItem
from wangyi.settings import count,ipPool


class JobSpider(scrapy.Spider):
    name = 'job'
    # 2.检查allowed_domains
    allowed_domains = ['163.com']
    # 1.修改start_urls
    start_urls = ['https://hr.163.com/position/list.do']

    def start_requests(self):
        # 第一次请求发起前先填充一下ip池,次地址通过，购买的webapi获得
        ips = requests.get('http://webapi.http.zhimacangku.com/getip?num=50&type=1&pro=&city=0&yys=0&port=1&time=1&ts=0&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1&regions=')
        for ip in ips.text.split('\r\n'):
            ipPool.append('http://' + ip)

    def parse(self, response):
        # 提取数据
        # 获取所有职位结点列表
        node_list = response.xpath("//*[@class='position-tb']//tbody/tr")
        # print(len(node_list))
        # 遍历节点列表
        for num, node in enumerate(node_list):
            # print(num,node)
            # 设置过滤条件，将目标结点获取出来
            if num % 2 == 0:
                item = WangyiItem()
                item["name"] = node.xpath("./td[1]/a/text()").extract_first()
                # response.urljoin()用于拼接相对路径的url，可以理解成自动补全
                item["link"] = response.urljoin(node.xpath("./td[1]/a/@href").extract_first())
                item["depart"] = node.xpath("./td[2]/text()").extract_first()
                item["category"] = node.xpath("./td[3]/text()").extract_first()
                item["type"] = node.xpath("./td[4]/text()").extract_first()
                item["addres"] = node.xpath("./td[5]/text()").extract_first()
                item["num"] = node.xpath("./td[6]/text()").extract_first().strip()
                item["date"] = node.xpath("./td[7]/text()").extract_first()

                print(item)
                yield scrapy.Request(
                    url=item["link"],
                    callback=self.parse_detail,
                    meta={"item":item}
                )

        # 模拟翻页,此处实现翻页，使用的是url
        part_url = response.xpath("//div[@class='m-page']/a[last()]/@href").extract_first()
        # 判断终止条件
        if part_url != "javascript:void(0)":
            next_url = response.urljoin(part_url)
            # 构建请求对象，返回给引擎
            yield scrapy.Request(
                url=next_url,
                callback=self.parse
            )

    def parse_detail(self,response):
        # 通过meta传参获取item
        item = response.meta["item"]
        # 提取剩余数据字段
        item["duty"] = response.xpath('').extract()
        item["require"] = response.xpath('').extract()
        yield item