

from scrapy.selector import HtmlXPathSelector
from scrapy.spider import BaseSpider
from mycrawler.items import CrawlChapterUrl


class Thirdlevel(BaseSpider):

    name = 'CrawlChapter'

    allowed_domin = ['http://www.jyeoo.com/math/ques']

    start_urls = [
        'http://www.jyeoo.com/math/ques/search',
        
                  ]


    def judge(self,hxs,name):
        print '!!!'
        versionurl = 'http://www.jyeoo.com'

        print hxs
        parts = hxs.select('ul/li')

        for part in parts:
            if part.select('ul/li').extract():
                print  part
                yield judge(part.select('ul/li'),'%s%s'%(name,part.select('a/text()').extract()[0]))
            else:
                print '!!'
                item = CrawlChapterUrl()
                item['chapter'] = '%s%s'%(name,part.select('a/text()')[0])
                item['url'] = '%s%s'%(versionurl,part.select('a/@href')[0])
                yield  item


    def parse(self, response):

        hxs = HtmlXPathSelector(response)

        name = hxs.select('//*[@id="page"]/div/table/tr[1]/td[1]/ul/li/a/text()').extract()[0]

        sites = hxs.select('//*[@id="divTree"]/ul/li')
        #print  sites
        items = []

        for site in sites:
            basechapter = '%s%s'%(name,site.select('a/text()').extract()[0])
            parts = site.select('ul/li')
            #print parts
            for part in parts:
                if part.select('ul/li').extract():
                    nextchapter = '%s%s'%(basechapter,part.select('a/text()').extract()[0])
                    endparts = part.select('ul/li')
                    for endpart in endparts:
                        item = CrawlChapterUrl()
                        item['chapter'] = '%s%s'%(nextchapter,endpart.select('a/text()').extract()[0])
                        item['url'] = endpart.select('a/@href').extract()
                        item['kind'] = 3
                        items.append(item)
                else:
                    item = CrawlChapterUrl()
                    item['chapter'] = '%s%s'%(basechapter,part.select('a/text()').extract()[0])
                    item['url'] = part.select('a/@href').extract()
                    item['kind'] = 3
                    items.append(item)
        print  items.__sizeof__()
        return  items
