# -*- coding: utf-8 -*-

from scrapy.selector import Selector
from scrapy.spider import Spider

from scrapy.utils.response import get_base_url
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor as sle
from scrapy.http import Request
from xs.items import *
import os
class DingdianSpider(CrawlSpider):
    name = 'dingdianwenxue'
    allowed_domains = ['23wx.com']
    start_urls = [
        'http://www.23wx.com/html/50/50046/'
    ]
    # rules = [
    #     Rule(sle(allow=("/50/50046/?$"), unique=True), callback='parse_chapter'),
    #     # Rule(sle(allow=("/50/50046/\d+\.html/?$"), unique=True), callback='parse_1'),
    # ]


    def parse(self, response):
        self.log('Hi, this is an chapter page! %s' % response.url)
        
        hrefs = response.xpath('//*[@id="at"]/tr/td/a/@href').extract()
        num = 1
        print type(hrefs)
        print len(hrefs)
        for href in hrefs[:-1]:
            item = XsItem()
            item['chapterUrl'] = href
            item['chapterNum'] = num
            num += 1
            url = response.url + href
            self.log('hi,it is %s' % url)
            yield Request(url, meta = {'item': item}, callback = self.parse_1)


    def parse_1(self, response):
        # items = []
        # sel = Selector(response)
        # sites = sel.xpath('//div[@class="bdsub"]/dl')
        # items = []

        # for site in sites:
        #     item = XsItem()
        #     item['chapterName'] = site.xpath('dd/h1/text()').extract()
        #     items.append(item)
        self.log('Hi, this is an item page! %s' % response.url)

        # item = XsItem()
        item = response.meta['item']
        item['chapterName'] = response.xpath('//div[@class="bdsub"]/dl/dd[1]/h1/text()').extract()[0]
        item['chapterContent'] = response.xpath('//*[@id="contents"]').extract()[0]
        return item