# -*- coding: utf-8 -*-
import scrapy
from lxml import etree
from scrapy.http import Request
from xn.items.items import XnItem,ChapterItem


class XncSpider(scrapy.Spider):
    name = 'xnc'
    allowed_domains = ['m.xncwxw2.com']
    start_urls = ['http://m.xncwxw2.com/xclass/0/1.html']
    base_url = 'http://m.xncwxw2.com'
    base_num = 0


    def parse(self, response):
        num = response.xpath("//p[@class='page']/input/@value").extract()[0].split("/")[-1]
        print("共 %s 页"%num)
        for i in range(1, int(num) + 1):
            yield Request(url='http://m.xncwxw2.com/xclass/0/%d.html' % i, callback=self.parse_title_list)


    def parse_title_list(self,response):
        page_a = response.xpath('//div[@id="main"]/div/a').extract()
        for a in page_a:
            html = etree.HTML(a)
            item = XnItem()
            item['base'] = self.base_url
            item['title'] = html.xpath('//p/text()')[-1].strip()
            item['href'] = html.xpath('//a/@href')[-1]
            item['page'] = response.url
            self.base_num += 1
            item['num'] = self.base_num
            yield Request(url=self.base_url+item['href'],meta={'item':item},callback=self.parse_chapter_list)

    def parse_chapter_list(self,response):
        item = response.meta['item']
        try:
            item['chapter_list']
        except:
            item['chapter_list'] = []
        # print('---------------------')
        # print(item)
        # print('---------------------')
        chapter_list = response.xpath('//div[@class="info_chapters"]/ul[2]//a').extract()
        if chapter_list:
            for chapter in chapter_list:
                html = etree.HTML(chapter)
                chapter_item = ChapterItem()
                chapter_item['name'] = html.xpath('//a/text()')[-1].strip()
                chapter_item['href'] = html.xpath('//a/@href')[-1]
                item['chapter_list'].append(chapter_item)

        # 下一页
        next_link = response.xpath('//span[@class="right"]/a/@href').extract()
        if next_link:
            yield Request(url=self.base_url + next_link[0],meta={'item':item}, callback=self.parse_chapter_list)
        else:
            # if len(response.meta['item']['chapter_list']) > 20:
            #     print('*********************************************************')
            #     print(response.meta['item'])
            #     print('*********************************************************')
            yield item




