# coding:utf-8
from urllib2 import Request

import scrapy
from scrapy.spiders import Rule
from scrapy.contrib.linkextractors import LinkExtractor

from crawler.items import ClassifyItem


class StandardListSpider(scrapy.Spider):
    pass
    # # 爬虫名称
    # name = "ListSpider"
    # # 设置下载延时
    # download_delay = 1
    # # 允许域名
    # allowed_domains = ["http://www.spc.org.cn"]
    # # 开始URL
    # start_urls = [
    #     "http://www.spc.org.cn/gb168/basicsearch"
    # ]
    # headers = {'Host': 'http://www.spc.org.cn',
    #            'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
    #            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'}
    #
    # # 爬取规则,不带callback表示向该类url递归爬取
    # rules = (
    #     Rule(LinkExtractor(allow=('page/[0-9]+',))),
    #     Rule(LinkExtractor(allow=('recipe-[0-9]+',)), callback='parse_content'),
    # )
    #
    # def parse(self, response):
    #     for sel in response.xpath('//ul/li'):
    #         item = StandardListItem()
    #         item['title'] = sel.xpath('span/text()').extract()
    #         yield item

class ClassifySpider(scrapy.Spider):
    # 爬虫名称
    name = "ClassifySpider"
    # 设置下载延时
    download_delay = 1
    # 允许域名
    allowed_domains = ["http://www.spc.org.cn"]
    # 开始URL
    start_urls = [
        "http://www.spc.org.cn/gb168/standardonline/"
    ]
    # 下级分类url
    detail_url = "http://www.spc.org.cn/gb168/standardonline/detail/"

    def parse(self, response):
        for sel in response.xpath('//ul/li'):
            item = ClassifyItem()
            item['name'] = sel.xpath('span/text()').extract()
            item['code'] = sel.xpath('span/text()').extract()
            global detail_url
            yield Request(detail_url, meta={'item': item}, callback=self.parse2)
