from scrapy.selector import Selector
from scrapy.spiders.crawl import CrawlSpider
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
__author__ = 'wuliao'
import scrapy
import re
class DmozSpider(CrawlSpider):
    name = "jun"
    allowed_domains = ["111.com.cn"]
    start_urls = [
        "http://www.111.com.cn/list/968135-0-2_91529_129796,2_92825_120526,2_93205_132972,2_92707_133623-11-0-0-0-1.html"
    ]
    # #"/position.php\?&start=\d{,4}#a")    allowed_domains = ['example.com']     \d{,50}.html
    rules = [
        Rule(LinkExtractor(allow=("http://www.111.com.cn/list/")), follow=True,callback='parse_list'),
        Rule(LinkExtractor(allow=("http://www.111.com.cn/product/")), follow=True, callback='parse_item')
    ]



    def parse_item(self ,response):
        filename = 'product'
        with open(filename, 'a') as f:
            f.write(response.url+'\n')
        print response.url+'\n'

    def parse_list(self , response):
        sel = Selector(response)
        sites = sel.xpath('//td[@class="title"]')


        filename = 'list'
        with open(filename, 'a') as f:
            f.write(response.url+'\n')
        print response.url+'\n'

    # def parse(self, response):

        # for sel in response.xpath('//li[@class="stitle"]'):
            # title = sel.xpath('a/text()').extract()
            # link = sel.xpath('a/@href').extract()
            # desc = sel.xpath('text()').extract()
            # re.compile('',re.S)
            # print sel.extract()+'////////////'extract
            # print sel.xpath('//li/a/h4/text()').extract()


            # print title, link, desc
