#!/usr/bin/env python
import os, os.path
import re

from urlparse import urlparse
from urllib import splitvalue, urlencode

import json

from scrapy.conf import settings
from scrapy.http import Request, FormRequest
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector

from douban.items import Subject

class DoubanSpider(CrawlSpider):
    domain_name = 'douban.com'
    
    rules = (
        Rule(SgmlLinkExtractor(allow=('/(book|movie|music)/mine')),
             process_links='process_links'),
        
        Rule(SgmlLinkExtractor(allow=('/subject/\d+/$', )),
             callback='parse_subject',
             process_links='skip_subject'),
    )
    
    def __init__(self):
        super(DoubanSpider, self).__init__()
        
        self.urls = set()
    
    def start_requests(self):
        self.log("login to douban as <%s>" % settings['DOUBAN_USERNAME'])
        
        return [FormRequest('http://www.douban.com/login',
                            formdata={ 'form_email': settings['DOUBAN_USERNAME'],
                                       'form_password': settings['DOUBAN_PASSWORD'] },
                            callback=self.logged_in)]
                            
    def logged_in(self, response):
        if response.status == 200 and response.url == 'http://www.douban.com/':
            for url in [
                'http://www.douban.com/book/mine',
                'http://www.douban.com/movie/mine',
                'http://www.douban.com/music/mine',
            ]:
                for status in ['do', 'wish', 'collect']:
                    request = Request("%s?status=%s" % (url, status),
                                      callback=self.parse,
                                      meta = { 'referer_url': response.url })                
                
                    yield request
                
    RE_SUBJECT_URL = re.compile('http://www.douban.com/subject/(?P<id>\d+)/')
                
    def skip_subject(self, links):        
        follows = []
        
        for link in links:
            m = self.RE_SUBJECT_URL.match(link.url)
            
            if m:
                id = m.group('id')
                path = os.path.join(settings['DOUBAN_PATH'], 'subject', id)
                
                if os.path.exists(path):
                    self.log("skip processed subject <%s>" % id)
                    continue
            
            follows.append(link)
            
        return follows
                
    def process_links(self, links):
        follows = []
        
        for link in links:
            o = urlparse(link.url)
            
            path = '/'.join([p for p in o.path.split('/') if p])
            
            params = [splitvalue(p) for p in o.query.split('&')] if o.query else []
            params = [(key, value) for (key, value) in params if key in ['status', 'start']]            
            params = dict(params)
            
            if params:
                path += '?' + urlencode(params)
            
            if path in self.urls:
                self.log("skip duplicate link <%s>" % link.url)                
                continue
            
            self.urls.add(path)            
            
            self.log("follow link <%s>" % link.url)
            
            follows.append(link)
            
        return follows            
    
    def parse_subject(self, response):
        m = self.RE_SUBJECT_URL.match(response.url)
        
        if m:
            id = int(m.group('id'))
            
            if settings['DOUBAN_RAWDATA']:
                dir = os.path.join(settings['DOUBAN_PATH'], 'subject', str(id))
                
                if not os.path.exists(dir):
                    os.makedirs(dir, mode=0755)
                    
                with open(os.path.join(dir, 'index.html'), 'w') as f:
                    f.write(response.body)
                    
                metadata = {
                        'url': response.url,
                        'status': response.status,
                        'headers': response.headers,
                    }
                    
                with open(os.path.join(dir, 'metadata.log'), 'w') as f:
                    f.write(json.dumps(metadata))
            
            subject = Subject(id=id, url=response.url)
            
            return subject
        
SPIDER = DoubanSpider()