# coding=utf-8
from urlparse import urljoin
import simplejson

from scrapy.http import Request
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector

from scan.items import ScanItem

class ScanSpider(CrawlSpider):
    name = 'Scan'
    allowed_domains = ["baidu.com"]
    start_urls = [
        "http://www.baidu.com",
    ]
    rules = (
        #下面是符合规则的网址,但是不抓取内容,只是提取该页的链接(这里网址是虚构的,实际使用时请替换)
        Rule(SgmlLinkExtractor(allow=(r'http://(.*?).baidu.com/'))),
        #下面是符合规则的网址,提取内容,(这里网址是虚构的,实际使用时请替换)
        #Rule(SgmlLinkExtractor(allow=(r'http://test_rul/test?product_id=\d+')), callback="parse_item"),
        )

    def parse_item(self, response):
        hxs = HtmlXPathSelector(response)
        item = ScanItem()
        item['title'] = hxs.select('//title/text()').extract()
        item['link'] = hxs.select('//a[@href]').re('(\"(.*)\")')
        return item
