from scrapy.selector import Selector
from scrapy.spiders import CrawlSpider,Rule
#from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
#from scrapy.linkextractors.sgml import SgmlLinkExtractor
from scrapy.linkextractors import LinkExtractor

from scrapy import Spider
from wikiSpider.items import Article

class ArticleSpider(Spider): #必须继承Spider
    name = "article" #根据这个名字来抓取数据
    allow_domains = ['en.wikipedia.org']
    #需要抓取的地址
    start_urls = ["http://en.wikipedia.org/wiki/Main_Page",
                    "http://en.wikipedia.org/wiki/Python_%28programming_language%29",
                ]

    def parse(self,response):
        item = Article()        
        title = response.xpath("//h1/text()")[0].extract()
        print("--------------- title is ::===",title,type(title))
        try:
            item['title'] = title
        except AttributeError as e:
            print("------------------ 出错了 -------- ",e)
        return item
'''
class ArticleSpider(CrawlSpider):
    name = "article2"
    allow_domains = ['en.wikipedia.org']
    #需要抓取的地址
    start_urls = ["http://en.wikipedia.org/wiki/Main_Page",
                    "http://en.wikipedia.org/wiki/Python_%28programming_language%29",
                ]
    rules = [Rule(LinkExtractor(allow=('(/wiki/)(?!:).*$')),callback='parse_item',follow=True)]

    def parse_item(self,response):
        item = Article()
        title = response.xpath("//h1/text()")[0].extract()
'''