#! /usr/bin/env python
#coding=utf-8
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.loader import XPathItemLoader
from scrapy.contrib_exp.crawlspider import CrawlSpider, Rule
from scrapy.contrib_exp.crawlspider.reqext import SgmlRequestExtractor
from scrapy.contrib_exp.crawlspider.reqproc import Canonicalize, \
        FilterDupes, FilterUrl
from scrapy.utils.url import urljoin_rfc

import re
from redis import Redis
from itertools import chain, imap, izip




class nuoqiu_spiders(CrawlSpider):
    TAG = 'NUOQIU'
    name = 'nuoqiu.com'
    allowed_domains = ['nuoqiu.com']
    start_urls = ['http://www.nuoqiu.com/']
    r = Redis()

    # extract requests using this classes from urls matching 'follow' flag
    request_extractors = [
        SgmlRequestExtractor(tags=['a'], attrs=['href']),
        ]

    # process requests using this classes from urls matching 'follow' flag
    request_processors = [
        Canonicalize(),
        FilterDupes(),
        FilterUrl(deny=(r'/\d+\.html$',r'/\d+/0/\d+\.html$',r'/\d+/list.html$',r'/\d+/\d+\.html$')), # deny movie url as we will dispatch
                                     # manually the movie requests
        ]
    

     # include domain bit for demo purposes
    rules = (
        # these two rules expects requests from start url
        Rule(r'nuoqiu.com/$', 'parse_top'),
        # this rule will parse requests manually dispatched
        Rule(r'nuoqiu.com/column/\d+\.html$', 'parse_story_url'),
        Rule(r'nuoqiu.com/column/\d+/0/\d+\.html$', 'parse_story_info'),
        Rule(r'nuoqiu.com/static/\d+/list.html$', 'parse_story_cat'),
        Rule(r'nuoqiu.com/static/\d+/\d+\.html$', 'parse_story_content'),
    )


    def parse_top(self, response):
        """Scrapes movies from top 250 list"""
        #self.log("Parsing Top")
        hxs = HtmlXPathSelector(response)

        # scrap each row in the table
        cat = {}
        rows = hxs.select('//div[@class="topdh"]/a')
        for row in rows:
            url, = row.select('@href').extract()
            url = self._urljoin(response, url)
			
			#cat_title, = row.select('text()').extract()
            cat_title, = row.select('text()').extract()
            key = re.search(r'(\d+)\.html$',url).group(1)
            cat[key] = cat_title
            #print url

            # fetch movie
            yield self.make_requests_from_url(url)
        self.r.set("NUOQIU_CAT_LIST",cat)

    def parse_story_url(self, response):
        """Scrapes movie information"""
        print response.url
        cat = re.search(r'(\d+)\.html$',response.url).group(1)
        hxs = HtmlXPathSelector(response)
        content, = hxs.select('//div[@class="gxshow"]/div[@class="pageviewp"]/li[1]/text()').extract()

        pagenum = re.search(r'^\d{1}/(\d+)',content.encode('utf-8')).group(1)

        story_url_list = []

        for i in range(1,int(pagenum)+1):
            url = self._urljoin(response, '/column/%d/0/%d.html' % (int(cat),i))
            story_url_list.append(url)
            yield self.make_requests_from_url(url)
        #print 'STORY:'+cat , story_url_list
        self.r.set(response.url,story_url_list)
        #print self.r.get('STORY:' + cat)
        



    def parse_story_info(self, response):
        hxs = HtmlXPathSelector(response)
        #urllist = hxs.select('//div[@class="gxshow"]/ul/li[1]/a/@href').extract()
        catnum = re.search(r'column/(\d+)',response.url).group(1)
        urllist = hxs.select('//div[@class="gxshow"]//ul')
        
        mdata = self.r.get(self.TAG + ':' + catnum)
        #print data == None
        if mdata == None:
            data = []
        else:
            data = eval(mdata)
        for sub in urllist:
            textattr = sub.select('li//text()').extract()
            hrefattr = sub.select('li/a/@href').extract()
            data.append(textattr + hrefattr)
            #data['href'] = hrefattr
            
            story_info_url = hrefattr[0].encode('utf-8')
            url = self._urljoin(response, story_info_url + 'list.html')
            yield self.make_requests_from_url(url)
        self.r.set(self.TAG + ':' + catnum,data)
        #print self.r.get(self.TAG + ':' + catnum)
        

    def parse_story_cat(self, response):
        #print response.url
        storynum = re.search(r'static/(\d+)',response.url).group(1)
        #storyurl = re.search(r'(static/\d+)',response.url).group(1)
        #print storyurl,storynum
        hxs = HtmlXPathSelector(response)
        rows = hxs.select('//div[@class="partlist"]/div[@class="top"]//following-sibling::li')
        
 
        jishu = []
        for row in rows:
           url = row.select('a/@href').extract()
           text = row.select('a/text()').extract()
           if not url or not text:continue
           jishu.append((url[0],text[0]))
            
           url = self._urljoin(response, url[0].encode('utf-8'))
           #print url
           yield self.make_requests_from_url(url)
           #print url,text
        #story['all'] = jishu
        self.r.set(self.TAG + ':' + storynum,jishu)
        
            
    def parse_story_content(self, response):
        contentnum = re.search(r'static/(\d+)/(\d+)\.html$',response.url).groups()
        key = ":".join(contentnum)
        
        
        #print key
        hxs = HtmlXPathSelector(response)
        rows = hxs.select('//div[@id="partbody"]//p')
        content = []
        for row in rows:
            c = row.select('text()').extract()
            if not c:continue
            content.append(c[0])
        self.r.set(self.TAG + ':' + key,content)
        len = self._savecompletecontent(':completed:content:' + contentnum[0],contentnum[1])
#        catlen = self.r.get(self.TAG + ':' + contentnum[0])
#        if len == catlen:
#            self._savecompletecontent(':completed:cat',contentnum[0])

            
        

    def _urljoin(self, response, url):
        """Helper to convert relative urls to absolute"""
        return urljoin_rfc(response.url, url, response.encoding)
    
    
    def _savecompletecontent(self,name,num):
        mcompletecontent = self.r.get(self.TAG + name)
        if mcompletecontent == None:
            completecontent = []
        else:
            completecontent = eval(mcompletecontent)
        completecontent.append(num)
        self.r.set(self.TAG + name,completecontent)
        return len(completecontent)
    
        

        
        
        
