'''
Created on Nov 3, 2009

@author: wangjo
'''

import re
import urlparse
import logging
from google.appengine import runtime
from google.appengine.runtime import apiproxy_errors
from google.appengine.ext import db
from html5lib import html5parser
from html5lib import treebuilders
from google.appengine.api import urlfetch
import mokomodel
import urllib

headers = {'User-Agent': 'Baiduspider', 'Referer': 'http://www.moko.cc',}
pheader = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.12) Gecko/2009070611 Firefox/3.5.12', 'Referer': 'http://www.moko.cc',}
class spider(object):
    _url_attributes = [
    ('a', 'href'),
    ('applet', 'codebase'),
    ('area', 'href'),
    ('blockquote', 'cite'),
    ('body', 'background'),
    ('del', 'cite'),
    ('form', 'action'),
    ('frame', 'longdesc'),
    ('frame', 'src'),
    ('iframe', 'longdesc'),
    ('iframe', 'src'),
    ('head', 'profile'),
    ('img', 'longdesc'),
    ('img', 'src'),
    ('img', 'usemap'),
    ('input', 'src'),
    ('input', 'usemap'),
    ('ins', 'cite'),
    ('link', 'href'),
    ('object', 'classid'),
    ('object', 'codebase'),
    ('object', 'data'),
    ('object', 'usemap'),
    ('q', 'cite'),
    ('script', 'src')]
    
    _re_pattern ='(^http://www.moko.cc)\/(\w+){1}(/)?$'
    
    viewedQueue = []
    instQueue = []
    _models = {}
    _count = 0    
    
    MAX_COUNT = 10
    def crawler(self, base_url, logger, simplecrawler):
        dom = None
        link = base_url
        while link != '':
            dom = self._get_dom(link)
            if dom:
                self._parse_dom(dom, link, base_url)
            if simplecrawler:
                link = ''
            else:
                link = self._get_next_link()
        self._dump(logger)
        
    def _get_next_link(self):
        if self.instQueue == [] or self._count == self.MAX_COUNT:
            return ''
        else:
            self._count += 1
            return self.instQueue.pop(0)
        
    def _get_dom(self, url):
        try:
            fetch = urlfetch.fetch(url, headers=headers)
            if fetch.status_code == 200:
                p = html5parser.HTMLParser(tree = treebuilders.getTreeBuilder('dom'))
                dom = p.parse(fetch.content)
                return dom
        except urlfetch.Error, e:
            logging.error('URL fetch has met exception: %s' % e.message)
        
        return None

    def _parse_dom(self, dom, self_url, base_url):
        match = re.match(self._re_pattern, self_url)
        if match:
            modelname = match.groups(1)[1]
            parse_model_data(modelname, dom, True)
            
        head = dom.getElementsByTagName('head')[0]
        if head:
            for b in head.getElementsByTagName('base'):
                u = b.getAttribute('href')
                if u:
                    base_url = urlparse.urljoin(base_url, u)
                    break
        for tag, attr in self._url_attributes:
            for e in dom.getElementsByTagName(tag):
                u = e.getAttribute(attr)
                if u:
                    url = urlparse.urljoin(base_url, u)
                    m = re.match(self._re_pattern, url)
                    if m:
                        if (url in self.viewedQueue) == False:
                            self.instQueue.append(url)
                            self.viewedQueue.append(url)
                        name = m.groups(1)[1]
                        if not self._models.has_key(name):
                            self._models[name] = url                            
                            
    def _dump(self, logger):
        for k,v in self._models.iteritems():
            logger.add_model(k, v)

def parse_model_data(username, dom, getphoto=False, fetchavatar = False):
    if not username: return 
    if not dom: return
    topgirl = False
    m = db.Query(mokomodel.model).filter('username =', username).get()
    if not m:
        m = mokomodel.model(username = username, homepage='http://www.moko.cc/%s'%username)
    node_profile = None
    node_info = None
    node_status = None
    divs = dom.getElementsByTagName('div')
    for d in divs:
        did = d.getAttribute('id')
        if did: 
            did = did.lower()
        else:
            continue
        if did == 'topman':
            node_profile = d
        elif did == 'aaa':
            node_info = d
        elif did == 'userlasttangle':
            node_status = d
        if node_profile and node_info and node_status: 
            break
        
    # determine if the model is top girl or top man
    if node_profile:
        links = node_profile.getElementsByTagName('a')
        for a in links:
            c = a.getAttribute('class')
            if c.find('name')>= 0:
                m.name = getText(a.childNodes)
            elif c.find('moko') >=0:
                lt = getText(a.childNodes)
                if lt.lower().find('moko top girl')>=0:
                    topgirl = True
                elif lt.lower().find('moko top man') >=0:
                    m.gender = True
                
        ls = node_profile.getElementsByTagName('li')
        categories = []
        for li in ls:
            a = li.getElementsByTagName('a')[0]
            c = getText(a.childNodes)
            categories.append(c)
        m.category = categories
        
        #parse the avatar photo
        imgs = dom.getElementsByTagName('img')
        for img in imgs:
            if img.getAttribute('id') == 'idPicture' or img.getAttribute('class').lower().find('usericon') >=0:
                m.avatar = db.Link(img.getAttribute('src'))
        
        #parse latest status:
        if node_status:
            s = getText(node_status.childNodes)
            m.status = s
        
        #parse address, age and visit count
        if node_info:
            ps = node_info.getElementsByTagName('p')
            for p in ps:
                cl = p.getAttribute('class')
                if cl:
                    cl = cl.lower()
                else:
                    continue
                if cl.find('address') >= 0:
                    m.address = getText(p.childNodes)
                elif cl.find('age') >= 0:
                    m.age = getText(p.childNodes)
                elif cl.find('visit') >=0:
                    v = getText(p.childNodes)
                    splits = v.split(':')
                    if len(splits)> 1:
                        m.visitcount = int(splits[1])
        
        #if top girl, set gender as False
        if topgirl:
            m = mokomodel.topgirl.add_topgirl(m.username, m)
        else:
            if not m.gender:
                m.gender = None
            m.put()
        if getphoto and topgirl:    #parse the homepage photos
            get_model_photo(m, dom)
        #fetch avatar photo
        if fetchavatar:
            fetch_avatar(m)
        return m
    
def getText(nodelist):
    rc = ''
    for node in nodelist:
        if node.nodeType == node.TEXT_NODE:
            rc = rc + node.data
    return rc.strip()

def get_model_photo(model, dom):
    #div picsmodle
    divs = dom.getElementsByTagName('div')
    for d in divs:
        if d.getAttribute('id') == 'picsmodle':
            cds = d.getElementsByTagName('div')
            photolist = []
            for c in cds:
                if c.getAttribute('id') == 'dynamicImageShow':
                    i = d.getElementsByTagName('input')[0]
                    var = i.getAttribute('value')
                    u = get_flash_photo_url(var)
                    imgs = parse_flash_xml_data(u)
                    photolist.extend(imgs)
                    break
                else:
                    cl = c.getAttribute('class').lower()
                    if cl.find('borderon') >= 0:
                        imgs = c.getElementsByTagName('img')
                        for img in imgs:
                            photolist.append(img.getAttribute('src'))
            for p in photolist:
                mokomodel.photo.add_photo(url = p, model=model)
            break                           
    

def update_model(username):
    if not username: return None
    m = db.Query(mokomodel.model).filter('username =', username).get()
    if m:
        url = m.homepage
    else:
        url = 'http://www.moko.cc/%s'%username
    fetch = urlfetch.fetch(url, headers=headers)
    if fetch.status_code == 200:
        p = html5parser.HTMLParser(tree = treebuilders.getTreeBuilder('dom'))
        dom = p.parse(fetch.content)
        m = parse_model_data(username, dom, getphoto=True, fetchavatar = True)
        return m
    
def get_flash_photo_url(var):
    return u'http://www.moko.cc%sdata.xml'%var

def parse_flash_xml_data(url):
    try:
        result = []
        fetch = urlfetch.fetch(url, headers=headers)
        if fetch.status_code == 200:
            p = html5parser.HTMLParser(tree = treebuilders.getTreeBuilder('dom'))
            dom = p.parse(fetch.content)
            imgs = dom.getElementsByTagName('img')
            for img in imgs:
                result.append( urlparse.urljoin('http://www.moko.cc/',img.getAttribute('src')))
        return result
    except urlfetch.Error,e:
        logging.error('urlfetch flash xml data has met error, url is:%s'%url)
        
def parse_gallery(url_gallery, model):
    result = False
    if not url_gallery: return result
    while url_gallery != '':
        try:
            logging.debug('fetch model gallery:%s'%url_gallery)
            fetch = urlfetch.fetch(url_gallery, headers = pheader)
            url_gallery = ''
            _re_galery_pattern = '\((\d+)\)'
            if fetch.status_code == 200:
                p = html5parser.HTMLParser(tree = treebuilders.getTreeBuilder('dom'))
                dom = p.parse(fetch.content)
                lis = dom.getElementsByTagName('li')
                divs = dom.getElementsByTagName('div')
                
                #first time, parse the model album total count
                if not model.count_album_total:
                    nums = None
                    for l in lis:
                        id = l.getAttribute('id')
                        if id and id.lower() == 'post':
                            spans = l.getElementsByTagName('span')
                            for s in spans:
                                if s.getAttribute('class').find('font12') >= 0:
                                    nums = getText(s.childNodes)
                                    break
                    #parse the ablum numbers
                    if nums:
                        m = re.match(_re_galery_pattern, nums)
                        if m:
                            val = m.groups(0)[0]
                            if val:
                                model.count_album_total = int(val)
                                model.put()
                for d in divs:
                    style = d.getAttribute('class')
                    if style and style.lower() == 'show l':
                        albumurl = None
                        # get album title
                        title = d.getAttribute('title')
                        #get album date
                        h6s = d.getElementsByTagName('h6')
                        if h6s:
                            h6 = h6s[0]
                            albumdate = getText(h6.childNodes)
                        #get album link
                        links = d.getElementsByTagName('a')
                        imgcover = d.getElementsByTagName('img')[0]
                        if imgcover:
                            cover = imgcover.getAttribute('src')
                        for al in links:
                            sty = al.getAttribute('class')
                            if sty:
                                sty = sty.lower()
                                if sty.find('coverbg') >=0 :
                                    ur = al.getAttribute('href')
                                    if ur.find('javascript:void') < 0:
                                        albumurl = urlparse.urljoin('http://www.moko.cc/', al.getAttribute('href'))
                                elif sty.find('reply') >=0:
                                    count_comment = re.match(_re_galery_pattern, getText(al.childNodes)).groups(0)[0]
                                elif sty.find('point') >=0:
                                    count_visit = re.match(_re_galery_pattern, getText(al.childNodes)).groups(0)[0]
                                elif sty.find('pic') >=0:
                                    count_photo = re.match(_re_galery_pattern, getText(al.childNodes)).groups(0)[0]
                        if albumurl:
                            a = mokomodel.album.add_album(model, albumurl, title, count_photo, count_visit, count_comment, albumdate, db.Link(cover))
                            if a:
                                logging.debug('add album:"%s" successful'%a.title)
                                result = True
                    if style and style.lower().find('pagination') >=0:
                        #find pager
                        al = d.getElementsByTagName('a')
                        for a in al:
                            cl = a.getAttribute('class')
                            if cl and cl.lower().find('down l')>=0:
                                url_gallery = urlparse.urljoin('http://www.moko.cc', a.getAttribute('href'))
        except urlfetch.Error, e:
            logging.error('fetch url "%(url)s" has met error: %(msg)s'%{'url': url_gallery, 'msg': e.message})
            url_gallery = ''
    
    return result          

def fetch_gallery(model):
    #http://www.moko.cc/post/317955396/indexpost.html
    url_gallery = u'http://www.moko.cc/post/%s/indexpost.html'%model.username
    return parse_gallery(url_gallery, model)
    
def fetch_album(album):
    if not album: return False
    if not album.url:
        logging.error('the specified album has no url')
        return False
    result = False
    try:
        logging.debug('fetch album:%s'%album.url)
        fetch = urlfetch.fetch(album.url, headers = pheader)
        if fetch.status_code == 200:
            p = html5parser.HTMLParser(tree = treebuilders.getTreeBuilder('dom'))
            dom = p.parse(fetch.content)
            lis = dom.getElementsByTagName('li')
            divs = dom.getElementsByTagName('div')
            for div in divs:
                sty = div.getAttribute('class')
                if sty and sty.lower() == 'article':
                    #photo list case
                    divpic = div.getElementsByTagName('div')
                    for d in divpic:
                        sc = d.getAttribute('class')
                        if sc:
                            sc = sc.lower()
                            if sc =='pic dashedon':
                                imgurl = None
                                imgs = d.getElementsByTagName('img')
                                if imgs:
                                    imgurl = imgs[0].getAttribute('src')
                                pres = d.getElementsByTagName('pre')
                                if pres:
                                    comment = getText(pres[0].childNodes)
                                if imgurl:
                                    logging.debug('found list photo:%s'%imgurl)
                                    mokomodel.photo.add_photo(imgurl, album.model, album, comment = comment)
                            if sc == 'piclist dashedon':
                                inputs = d.getElementsByTagName('inputs')
                                for input in inputs:
                                    imgurl = None
                                    id = input.getAttribute('id')
                                    if id.find('seePhoto'):
                                        imgurl = input.getAttribute('value')
                                        if imgurl:
                                            logging.debug('found thumb photo:%s'%imgurl)
                                            mokomodel.photo.add_photo(imgurl, album.model, album)
                    result = True
                                    
    except urlfetch.Error, e:
        logging.error('fetch album has met error, url:%(url)s, error message:%(message)s'%{'url':album.url, 'message': e.message})
                        
    return result
                  
def fetch_photo(url):
    try:
        url = urllib.unquote(url)
        result = urlfetch.fetch(url, headers = pheader)
        logging.debug('status_code:%d'%result.status_code)
        logging.debug(result.headers)
        if result.status_code == 200:
            return db.Blob(result.content)
    except urlfetch.Error, e:
        logging.error('DownloadError with loading photo :%(url)s, and error message:%(msg)s'%{'url':url, 'msg':e.message})
    except runtime.DeadlineExceededError, e:
        logging.error('DeadlineExceededError with loading photo: %s' % url) 
    except apiproxy_errors.DeadlineExceededError, e:
        logging.error('DeadlineExceededError with loading photo: %s' % url) 
    except apiproxy_errors.OverQuotaError, e:
        logging.error('OverQuotaError with loading photo: %s' % url) 
    return None   

def fetch_avatar(m):
    if not m: return
    blob = fetch_photo(m.avatar)
    if blob:
        mokomodel.rawphotodata.add_avatar_photo(m.avatar, blob, m)
        mokomodel.cronlog.add_crawler_avatar(m.username, True) 
    else:
        mokomodel.cronlog.add_crawler_avatar(m.username, False)    
              
def test_parse_dom(username):
    return update_model(username)
        