# -*- coding: utf-8 -*-

#scrapy import 
from scrapy.spiders import CrawlSpider
from scrapy.spiders import Rule
from scrapy.selector import Selector
from scrapy.http import Request
from scrapy.linkextractors import LinkExtractor

#base import 
import string, os, sys
from urlparse import urljoin
# import logging
# logger = logging.getLogger("scrapy")
# logger.setLevel(logging.WARN)
# print (logger.getEffectiveLevel())
# print (logger.getEffectiveLevel())
# print (logger.getEffectiveLevel())
# nullhandler = logging.NullHandler()
# logger.addHandler(nullhandler)

#config import 
from config.gl import DEBUG


class BlahSpider(CrawlSpider):
    name = "blah"
    allowed_domains = ["blah.me"]
    start_urls = (
        'http://blah.me/',
    )
    rules = [
        Rule(LinkExtractor(allow= ("http://blah.me/category/")), callback="parse_typeindex")
    ]


    def parse_typeindex(self, response):
        sel = Selector(response)
        url = response.url 
    

        #type pages 
        pagelist = sel.xpath("//ul[@class='pagination ok-book-list-init']/li")

        print ("==================>>>>>{a}".format(a=pagelist[-1].xpath("a/@data-page").extract()[0]))
        pagenum = int(pagelist[-1].xpath("a/@data-page").extract()[0])

        for pageidx in range(1, pagenum+1):
            url_page = urljoin(url, "?p="+str(pageidx))
            print (url_page, "---->>>>url_page")

