'''
Created on 2009-12-3

@author: wangyongtao
'''
from google.appengine.api import urlfetch
from google.appengine.ext import db
from google.appengine import runtime
from html5lib import html5parser
from html5lib import treebuilders
from datetime import date
import logging
import urlparse
import re
import datamodel
from BeautifulSoup import BeautifulSoup          # For processing HTML
import urllib
import Cookie

_BDHeaders = {'User-Agent': 'Baiduspider', 'Referer': 'http://www.1cbn.com',}
_FireFoxHeader = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.12) Gecko/2009070611 Firefox/3.5.12', 'Referer': 'http://www.1cbn.com',}
_SodexoHeader = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.12) Gecko/2009070611 Firefox/3.5.12', 'Referer': 'http://www.club.cn.sodexo.com/affiliates/search',}
_ShowUrl = 'http://www.1cbn.com/leadership/itemlist.aspx?Page=%(page)d&columntype=0&columnId=%(colid)d'
_cbn_user = 'atia'
_cbn_pwd = '123456'
_re_column_pattern ='(^http://www.1cbn.com/leadership/newcolumn.aspx\?columnid=)(\d+){1}$'
_re_show_pattern ='(^http://www.1cbn.com/leadership/iteminfo.aspx\?columnid=)(\d+){1}(\&columnno=)(\d+){1}'
_re_host_pattern = '(http://www.1cbn.com/cbninfo/emceeinfo.aspx\?emceeid=)(\d+){1}$'
_sodexo_entry_url = 'http://www.club.cn.sodexo.com/'
_re_sodexo_pattern = '(^/node/)(\d+)$'
_re_sodexo_page_pattern = '(^/affiliates/search\?page=)(\d+)$'
_re_lonlat_pattern = '("id": "auto3map", "latitude": ")([1-9]\d*\.\d*|0\.\d*[1-9]\d*){1}(", "longitude": ")([1-9]\d*\.\d*|0\.\d*[1-9]\d*){1}'

class crawler(object):
    _url_attributes = [
    ('a', 'href'),
    ('applet', 'codebase'),
    ('area', 'href'),
    ('blockquote', 'cite'),
    ('body', 'background'),
    ('del', 'cite'),
    ('form', 'action'),
    ('frame', 'longdesc'),
    ('frame', 'src'),
    ('iframe', 'longdesc'),
    ('iframe', 'src'),
    ('head', 'profile'),
    ('img', 'longdesc'),
    ('img', 'src'),
    ('img', 'usemap'),
    ('input', 'src'),
    ('input', 'usemap'),
    ('ins', 'cite'),
    ('link', 'href'),
    ('object', 'classid'),
    ('object', 'codebase'),
    ('object', 'data'),
    ('object', 'usemap'),
    ('q', 'cite'),
    ('script', 'src')]
    
    
    _columns = []
    _viewedQueue = []
    _instQueue = []
    _count = 0    
    
    _MAX_COUNT = 50
    def crawler(self, base_url, simplecrawler):
        dom = None
        link = base_url
        while link != '':
            dom = _get_dom(link)
            if dom:
                self._parse_dom(dom, link, base_url)
            if simplecrawler:
                link = ''
            else:
                link = self._get_next_link() 
                       
    def _get_next_link(self):
        if self._instQueue == [] or self._count == self._MAX_COUNT:
            return ''
        else:
            self._count += 1
            return self._instQueue.pop(0)

    def _parse_dom(self, dom, self_url, base_url):
        self_url = self_url.lower()
        match = re.match(_re_column_pattern, self_url)
        if match:
            _parse_column_dom(dom, self_url, match.groups(1)[1])
        match = re.match(_re_show_pattern, self_url)
        if match:
            _parse_show_dom(dom, self_url, match.groups(1)[1], match.groups(1)[3])
        match = re.match(_re_host_pattern, self_url)
        if match:
            _parse_host_dom(dom, self_url, match.groups(1)[1])
        for tag, attr in self._url_attributes:
            for e in dom.getElementsByTagName(tag):
                u = e.getAttribute(attr)
                if u:
                    url = urlparse.urljoin(base_url, u)
                    url = url.lower()
                    mc = re.match(_re_column_pattern, url)
                    mh = re.match(_re_host_pattern, url)
                    ms = re.match(_re_show_pattern, url)
                    if mc or mh or ms:
                        if (url in self._viewedQueue) == False:
                            self._instQueue.append(url)
                            self._viewedQueue.append(url)                                  
            
def _get_dom(url, header=_BDHeaders):
        try:
            fetch = urlfetch.fetch(url, headers = header, follow_redirects=False)
            if fetch.status_code == 200:
                p = html5parser.HTMLParser(tree = treebuilders.getTreeBuilder('dom'))
                dom = p.parse(fetch.content)
                return dom
            elif fetch.status_code == 302:
                fetch = urlfetch.fetch(url, headers = header)
                p = html5parser.HTMLParser(tree = treebuilders.getTreeBuilder('dom'))
                dom = p.parse(fetch.content)
                vs = _get_viewstate(dom)
                _cbn_login(_cbn_user, _cbn_pwd, vs)
        except urlfetch.Error, e:
            logging.error('URL fetch has met exception: %s' % e.message)
        
        return None

def _get_bsoup_dom(url, header=_BDHeaders):
    try:
        fetch = urlfetch.fetch(url, headers= header, follow_redirects = False)
        if fetch.status_code == 200:
            dom = BeautifulSoup(fetch.content)
            return dom
    except urlfetch.Error, e:
            logging.error('URL fetch has met exception: %s' % e.message)
    return None      

def _get_rawcontent(url, header=_SodexoHeader):
    try:
        #newurl = 'http://html5.validator.nu/?doc=%s&showsource=yes'%urllib.quote(url)
        #logging.info('new url:%s'%newurl)
        fetch = urlfetch.fetch(url)
        if fetch.status_code == 200:
            return fetch.content
    except urlfetch.Error, e:
            logging.error('URL fetch has met exception: %s' % e.message)
    return None 

def _getText(node):
    rc = ''
    for n in node.childNodes:
        if n.nodeType == node.TEXT_NODE:
            rc = rc + n.data
        elif n.childNodes:
            rc = rc + _getText(n)
    return rc.strip()

def _parse_column_dom(dom, url, columnID, fetchItem = False):
    spans = dom.getElementsByTagName('span')
    desc = None
    title = None
    cover = None
    host = None
    for span in spans:
        id = span.getAttribute('id')
        if id and id.lower() == 'columnnamebig':
            title = _getText(span)
        if id and id.lower() == 'columncontent':
            desc = _getText(span)
        if id and id.lower() == 'columnitemlist':
            if fetchItem:
                itemlinks = span.getElementsByTagName('a')
                for lnk in itemlinks:
                    showurl = urlparse.urljoin(url, lnk.getAttribute('href'))
                    match = re.match(_re_show_pattern, showurl.lower())
                    if match:
                        showdom = _get_dom(showurl)
                        if showdom:
                            _parse_show_dom(showdom, showurl, columnID, match.groups(1)[3])
    tables = dom.getElementsByTagName('table')
    for t in tables:
        id = t.getAttribute('id')
        if id and id.lower() == 'tvshow':
            imgs = t.getElementsByTagName('img')
            for img in imgs:
                cover = urlparse.urljoin(url, img.getAttribute('src')) 
    alinks = dom.getElementsByTagName('a')
    for a in alinks:
        cl = a.getAttribute('class')
        if cl and cl.lower() == 'toplink':
            host = _getText(a)
    column = datamodel.cbncolumn.add_cbncolumn(title, columnID, title, url)
    if cover or desc or host:
        if host:
            column.host = host
        if cover:
            column.cover = db.Link(cover)
        if desc:
            column.description = desc
        column.put()
    return
def _parse_host_dom(dom, url, hostID):
    spans = dom.getElementsByTagName('span')
    title = None
    desc = None
    avatar = None
    for span in spans:
        id = span.getAttribute('id')
        if id:
            id = id.lower()
            if id == 'labelname':
                title = _getText(span)
            elif id == 'labelintro':
                desc = _getText(span)
            elif id == 'labelimg':
                imgs = span.getElementsByTagName('img')
                for img in imgs:
                    avatar = urlparse.urljoin(url, img.getAttribute('src'))
                    
        
    host = datamodel.host.add_host(title, hostID, title, url)
    if desc or avatar:
        if desc:
            host.description = desc
        if avatar:
            host.avatar = db.Link(avatar)
        host.put()
    return
def _parse_show_dom(dom, url, columnID, showID):
    objs = dom.getElementsByTagName('object')
    mmsurl = None
    for v in objs:
        paras = v.getElementsByTagName('param')
        for p in paras:
            name = p.getAttribute('name')
            if name and name.lower() == 'url':
                mmsurl = p.getAttribute('value')
    if mmsurl:
        spans = dom.getElementsByTagName('span')
        column = None
        title = None
        showtime = None
        for span in spans:
            id = span.getAttribute('id')
            if id:
                id = id.lower()
                if id == 'columnname':
                    column = _getText(span)
                elif id == 'columnitemname':
                    title = _getText(span)
                elif id == 'playtime':
                    showtime = _getText(span)
        if title:
            s = datamodel.cbnshow.add_cbnshow(showID, columnID, title, column, url, mmsurl)
            splits = showtime.split('-')
            if len(splits) == 3:
                year = int(splits[0])
                month = int(splits[1])
                day = int(splits[2])
                st = date(year, month,day)
                s.showtime = st
                s.put()
        else:
            logging.error('unexpected show:%s'%url)
    return

def _cbn_login(user, password, viewstate):
    logging.debug('automatic login request')
    login_uri = 'http://www.1cbn.com/LogIn.aspx'
    form_fields = {'tbUserName':user, 
                   'tbPassword':password, 
                   '__EVENTTARGET':'lblLogin',
                   '__EVENTARGUMENT':'',
                   '__VIEWSTATE':viewstate,
                   'chkRemember':'on'}
    form_data = urllib.urlencode(form_fields)
    result = urlfetch.fetch(url=login_uri,
                            payload=form_data,
                            method=urlfetch.POST,
                            headers={'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.12) Gecko/2009070611 Firefox/3.5.12', 'Referer': 'http://www.1cbn.com'})
    cookie = Cookie.SimpleCookie(result.headers.get('set-cookie', '')) 
    if cookie:
        _BDHeaders['Cookie'] = _make_cookie_header(cookie)

def _make_cookie_header(cookie):
    ret = ""
    for val in cookie.values():
        ret+="%s=%s; "%(val.key, val.value)
    return ret      
def _get_viewstate(dom):
    inputs = dom.getElementsByTagName('input')
    for i in inputs:
        name = i.getAttribute('name')
        if name and name == '__VIEWSTATE':
            vs = i.getAttribute('value')
            return vs
    return None

def get_sodex_pagenum(url):
    if not url: return -1
    soup = _get_bsoup_dom(url)
    if soup:
        last = soup.find('li', {'class': 'pager-last last'})
        if last:
            logging.info('found last li')
            a = last.find('a')
            if a:
                href = a['href']
                logging.info('href:%s'%href)
                #/affiliates/search?page=153
                match = re.match(_re_sodexo_page_pattern, href)
                if match:
                    page = match.groups(1)[1]
                    logging.info('page count:%s'%page)
                    return int(page)
    return -1

def fetch_sodexo_page(url):
    if not url: return False
    soup = _get_bsoup_dom(url)
    if soup:
        table = soup.find('table', {'class' : 'views-table'})
        if table:
            tbody = table.find('tbody')
            if tbody:
                trs = tbody.findAll('tr')
                for tr in trs:
                    uid = None
                    link = None
                    title = None
                    item = tr.find('a')
                    if item:
                        href = item['href']
                        if href:
                            link = db.Link(urlparse.urljoin(url, href))
                            match = re.match(_re_sodexo_pattern, href)
                            if match:
                                uid = match.groups(1)[1]
                            title = item['title']
                            if title is None:
                                title = item['alt']
                    phone = tr.find('div', {'class': 'phone'})
                    if uid and link:
                        shop = datamodel.sodexounit.add_unit(uid, link, title)
                        if phone and shop.telephone is None:
                            tel = phone.contents[1].string
                            if len(tel) == 8:
                                tel = '021%s'%tel
                            shop.telephone = unicode(tel)
                            shop.put()

def fetch_business_unit(uid):
    logging.info('fetch business unit:%d'%uid)
    unit = datamodel.sodexounit.get_unit_by_id(uid)
    if not unit: 
        logging.info('not found unit in data store')
        url = 'http://www.club.cn.sodexo.com/node/%d'%int(uid)
    else:
        url = unit.url
    logging.info('start fetch unit:%s'%url)
    raw = _get_rawcontent(url, _SodexoHeader)
    if raw:
        logging.info(raw)
        match = re.match(_re_lonlat_pattern, raw)  
        if match:
            logging.info('found match')
            logging.info('long:%s'%match.groups(1)[1])
        else:
            logging.info('no match')   
    else:
        logging.info('dom is none')

def fetch_show(column):
    page = datamodel.log.get_showpage(column)
    url = _ShowUrl%{'page': page, 'colid':column.cbnid}
    logging.debug('fetch show url:%s'%url)
    dom = _get_dom(url)
    if dom:
        items = []
        alinks = dom.getElementsByTagName('a')
        try:
            for a in alinks:
                ac = a.getAttribute('class')
                if ac and ac.lower() == 'itemlist':
                    itemurl = urlparse.urljoin(url, a.getAttribute('href'))
                    match = re.match(_re_show_pattern, itemurl.lower())
                    if match:
                        colid = match.groups(1)[1]
                        showid = match.groups(1)[3]
                        logging.debug('found show item:%s'%itemurl)
                        items.append(itemurl)
                        s = datamodel.cbnshow.show_exist(showid, colid)
                        if not s:
                            showdom = _get_dom(itemurl)
                            if showdom:
                                _parse_show_dom(showdom, itemurl, colid, showid)
            
            if len(items) > 0:
                datamodel.log.add_showpage(column, page, True)
            else:
                datamodel.log.add_crawler_show(column.urlcbn, True)
        except runtime.DeadlineExceededError, e:
                logging.error('fetch show has met deadline error:%s'%e.message)
                logging.debug('the column is:%s'%column.title_cn)
                datamodel.log.add_showpage(column, page, False)
    else:
        logging.error('return none dom for url:%s'%url)

def fetch_column(column):
    if not column: return False
    if not column.urlcbn:
        logging.error('the specified column has no url')
        return False
    result = False
    try:
        logging.debug('fetch column:%s'%column.urlcbn)
        s = crawler()
        s.crawler(column.urlcbn, True)
        result = True
    except urlfetch.Error, e:
        logging.error('fetch column has met error, url:%(url)s, error message:%(message)s'%{'url':column.urlcbn, 'message': e.message})
    
    return result   

def update_column(id):
    if not id: return False
    id = int(id)
    c = datamodel.cbncolumn.get_column_cbnid(id)
    if not c: return False
    coldom = _get_dom(c.urlcbn)
    if coldom:
        _parse_column_dom(coldom, c.urlcbn, id, False)
        _parse_column_dom(coldom, c.urlcbn, id, True)
        
              