# -*- coding: utf-8 -*-
"""
Created on 2015/12/21 21:06
@File: haodf.py
@Author: Liangrong Li
@参考资料：http://doc.scrapy.org/en/latest/topics/item-pipeline.html
"""
from scrapy.spiders import Spider
from scrapy.http import Request
from wenzhen.items import HaodfItem


# 自动抓取多页
class HaodfSpider(Spider):
    name = "haodf"
    allowed_domains = ["haodf.com"]
    start_urls = (
        # 'http://zixun.haodf.com/dispatched/all.htm?p=1',
        'http://zixun.haodf.com/dispatched/1.htm?p=100',
    )

    def parse(self, response):
        # items = []
        item = HaodfItem()
        # 获取文章标题和url
        title = response.xpath('//li[@class="clearfix"]/span[1]/a[2]/text()').extract()
        url = response.xpath('//li[@class="clearfix"]/span[1]/a[2]/@href').extract()
        item['title'] = [t for t in title]
        item['url'] = [u for u in url]
        # content = response.xpath('//li[@class="clearfix"]')
        # for i in content:
        #     item['title'] = i.xpath('span[1]/a[2]/text()').extract()
        #     item['url'] = i.xpath('span[1]/a[2]/@href').extract()
        #     items.append(item)
        yield item
        # yield items

        # 获得下一篇文章的url
        # rel_url = response.xpath('//div[@class="p_bar"]/a[8]/@href').extract()    #这种方式获取的下一页url不正确
        rel_url = response.xpath('//div[@class="p_bar"]/a[last()-2]/@href').extract()
        for r in rel_url:
            next_url = 'http://zixun.haodf.com' + r
            # print next_url
            yield Request(next_url,callback=self.parse)