# -*- coding: utf-8 -*-
import scrapy
from wangyi.items import WangySimpleiItem

class JobSimpleSpider(scrapy.Spider):
    name = 'job_simple'
    allowed_domains = ['163.com']
    start_urls = ['http://163.com/']

    def parse(self, response):
        # 提取数据
        # 获取所有职位结点列表
        node_list = response.xpath("//*[@class='position-tb']//tbody/tr")
        # print(len(node_list))
        # 遍历节点列表
        for num, node in enumerate(node_list):
            # print(num,node)
            # 设置过滤条件，将目标结点获取出来
            if num % 2 == 0:
                item = WangySimpleiItem
                item["name"] = node.xpath("./td[1]/a/text()").extract_first()
                # response.urljoin()用于拼接相对路径的url，可以理解成自动补全
                item["link"] = response.urljoin(node.xpath("./td[1]/a/@href").extract_first())
                item["depart"] = node.xpath("./td[2]/text()").extract_first()
                item["category"] = node.xpath("./td[3]/text()").extract_first()
                item["type"] = node.xpath("./td[4]/text()").extract_first()
                item["addres"] = node.xpath("./td[5]/text()").extract_first()
                item["num"] = node.xpath("./td[6]/text()").extract_first().strip()
                item["date"] = node.xpath("./td[7]/text()").extract_first()

                print(item)
                yield item

        # 模拟翻页,此处实现翻页，使用的是url
        part_url = response.xpath("/html/body/div[2]/div[2]/div[2]/div/a[last()]/@href").extract_first()
        # 判断终止条件
        if part_url != "javascript:void(0)":
            next_url = response.urljoin(part_url)
            # 构建请求对象，返回给引擎
            yield scrapy.Request(
                url=next_url,
                callback=self.parse
            )

