#coding=utf8

import os, wsgiref.handlers, logging, time, datetime, urllib
from google.appengine.ext import webapp
from google.appengine.api import urlfetch
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from google.appengine.api import memcache
from BeautifulSoup import BeautifulSoup, SoupStrainer
from rsstool.model import *


B_DEV = os.environ['SERVER_SOFTWARE'].startswith('Dev')  # Development server
#B_DEV = False

if B_DEV:
    CACHE_TIMEOUT = 1 # 1 seconds
else:
    CACHE_TIMEOUT = 10800 # 3 hours 

class BaseHtmlHandler(object):
    PERMALINK_PREFIX = 'http://itool.appspot.com/rsstool/urlmapping/'
    def __init__(self):
        self.url = None
        self.html = None
        self.atom = None            # ATOM string, output to user browser of GR 
        self.entryhead = None       # a data structure, header of ATOM
        self.entrylist = None       # a data structure, entrylist of ATOM
    
    def getHtml(self):
        '''Return HTML'''
        if B_DEV: # development server
            h = open('gougousearch-1.htm', 'r')
#            h = open('gougousearch.htm', 'r')
#            h = open('top_9_0.html', 'r')
#            h = open('dabagirl.htm', 'r')
#            h = open('youkusearch.htm', 'r')
#            h = open('cherrykoko.htm', 'r')
#            h = open('styleonme.htm', 'r')
            self.html = h.read()
            h.close()
        else: # real server
            if self.url:
                try:
                    result = urlfetch.fetch(self.url)
                    if result.status_code == 200:
                        self.html = result.content
                except:
                    logging.info('fetch error - '+self.url)

        return self.html
    
    def genPermalink(self, url):
        def txn():
            # get max index
            index_entity = UrlMapIndex.get_by_key_name('rsstool')
            if index_entity is None:
                index_entity = UrlMapIndex(key_name='rsstool')
            new_index = index_entity.max_index
            index_entity.max_index += 1
            index_entity.put()
            
            new_url_mapping = UrlMap(
                parent = index_entity,
                key_name=url,
                index=new_index,
                url=url
            )
            new_url_mapping.put()
            return new_index
        
#        url_mapping = UrlMap.gql('WHERE url = :url', url=url).fetch(1)
        url_mapping = UrlMap.get_by_key_name(
            key_names=url,
            parent=db.Key.from_path('UrlMapIndex', 'rsstool')
        )
        if url_mapping is None:
            i = db.run_in_transaction(txn)
        else:
            i = url_mapping.index

        return BaseHtmlHandler.PERMALINK_PREFIX+str(i)
    
    def htmlParser(self):
        '''Return parse result. It should be a entry list'''
        pass

    def getAtom(self):
        pass

class GeneralHtmlHandler(BaseHtmlHandler):
    def __init__(self):
        BaseHtmlHandler.__init__(self)
        
    def htmlParser(self):
        if not self.html:
            self.getHtml()
        self.doParse()
        return self.entrylist
    
    def getAtom(self):
        output = memcache.get(self.__class__.__name__)
        if output is not None:
            logging.info(self.__class__.__name__ + ' - from memcache')
        else:
            if not self.entrylist:
                self.htmlParser()
                
            path = os.path.join(os.path.dirname(__file__), 'atom.xml')
            output = template.render(path, {
                'entryhead': self.entryhead,
                'entrylist': self.entrylist,
            })
            
            memcache.add(key=self.__class__.__name__, value=output,
                time=CACHE_TIMEOUT)
            
        return output
    
    def doParse(self):
        pass

# TODO: 1. URL在数据库中唯一标识
# TODO: 2. Feed的validation (每个entry提交时间的格式)
# TODO: 3. 每个项目的URL对应的搜索页面不对，加入到url_mapping
class GougouLatestFilmHandler(BaseHtmlHandler):
    def __init__(self):
        BaseHtmlHandler.__init__(self)
        self.url = 'http://www.gougou.com/rank/top_9_0.html'
        
        # initial entryhead
        self.entryhead = {
            'title':u'狗狗影视-最新电影',
            'root_url':'http://www.gougou.com/rank/top_9_0.html',
            'description':u'狗狗影视-最新电影',
            'atom_url':'http://itool.appspot.com/rsstool/gougoulatestfilm.atom',
            'last_update':(datetime.datetime.now()).strftime('%Y-%m-%dT%H:%M:%SZ'),
            'author':'A.TNG',
        }
    
    def htmlParser(self):
        if not self.html:
            self.getHtml()

        # 取<div class='phbox'>中的东西
        phbox = SoupStrainer('div', {'class':'phbox'}) 
        phlist = [m for m in BeautifulSoup(self.html, parseOnlyThese=phbox, fromEncoding='GBK')]
        
        self.entrylist = []
        for item in phlist:
            entry = {}
            
            # construct title
            entry['title'] = u'%s (%s)' % \
                (item.li.a.string, item.li.strong.string)
            # construct permalink
#            entry['permalink'] = u'http://www.gougou.com/search?search=%s' % \
#                item.li.a.string+' '+item.li.strong.string
            entry['permalink'] = self.genPermalink(item.li.a['href'])
            # construct content
            tag_span = item.span
            tag_span.extract()
            entry['content'] = unicode(item)
            self.entrylist.append(entry)
                        
        return self.entrylist
        
    def saveEntry(self, title, permalink, content):
        entry = GougouLatestFilm.get_by_key_name(permalink)
        if entry is None:
            new_entry = GougouLatestFilm(
                key_name=permalink,
                title=title,
                permalink=permalink,
                content=db.Text(content)
            )
            new_entry.put()

    def saveToDatastroe(self):
        if not self.entrylist:
            self.htmlParser()
            
        for entry in self.entrylist:
            self.saveEntry(entry['title'], entry['permalink'], entry['content'])
    
    def goSpider(self):
        self.saveToDatastroe()  
    
    def getAtom(self):
        path = os.path.join(os.path.dirname(__file__), 'atom.xml')
        entries = GougouLatestFilm.gql('ORDER BY update_time DESC').fetch(25)
        for e in entries:
            e.title = e.title.encode('utf-8')
#            e.content = unicode(e.content).encode('utf-8')
        output = template.render(path, {
            'entryhead': self.entryhead,
            'entrylist': entries
        })
        return output

# TODO: 1. title中的HTML标签，最好能去掉
# TODO: 2. 通过Feed的validation
class GougouSearchHandler(BaseHtmlHandler):
    content_template = u'''
    <ul>
    <li><strong>影片质量: </strong>%s</li>
    <li><strong>影片大小：</strong>%s</li>
    <li><strong>影片格式：</strong>%s</li>
    </ul>
    '''
    def __init__(self, keyword=''):
        BaseHtmlHandler.__init__(self)
        self.url = 'http://www.gougou.com/search?search=%s' % \
            urllib.quote(keyword.encode('GBK'))
        
        self.keyword = keyword
        # initial entryhead
        self.entryhead = {
            'title':u'狗狗影视搜索 - %s' % keyword,
            'root_url':'http://www.gougou.com',
            'description':u'狗狗影视搜索 - %s (ATOM格式输出)' % keyword,
            'atom_url':'',
            'last_update':(datetime.datetime.now()).strftime('%Y-%m-%dT%H:%M:%SZ'),
            'author':'A.TNG',
        }
    
    def htmlParser(self):
        if not self.html:
            self.getHtml()
            
        # fetch html error
        if self.html is None:
            return

        # 取 <table border="0" cellspacing="0" cellpadding="0" class="ggTable"> 
        # 中的内容
        self.entrylist = []
        mytable = SoupStrainer('table', {'class':'ggTable'})
        li = [m for m in BeautifulSoup(self.html, parseOnlyThese=mytable, 
            fromEncoding='GBK')]
        if (1 == len(li)):
            ggtable = li[0]
        elif (2 == len(li)):
            ggtable = li[1]
        else:
            logging.info('gougou search, not one or two table with class ggTable')
            return
        for tag_tr in ggtable.contents:
            b_item = False
            try:
                # 带 + 的tr是包含搜索结果信息的
                b_item = (tag_tr.td.a is not None)
            except AttributeError, e:
                pass

            if b_item:
                entry = {}
                td_list = tag_tr.findAll('td')
                try: 
                    entry['title'] = unicode(td_list[1].a)
                    entry['permalink'] = self.genPermalink(td_list[1].a['href'])
                    entry['content'] = GougouSearchHandler.content_template % \
                        (td_list[3].div['class'], 
                        td_list[4].string,
                        td_list[5].string,
                        )
                except:
                    logging.info(self.__class__.__name__+'-'+self.keyword)
                    
                self.entrylist.append(entry)
               
        return self.entrylist
    
    def getAtom(self):
        output = memcache.get(self.__class__.__name__+self.keyword)
        if output is not None:
            logging.info(self.__class__.__name__+self.keyword+' - from memcache')
        else:
            if not self.entrylist:
                self.htmlParser()

            path = os.path.join(os.path.dirname(__file__), 'atom.xml')
            output = template.render(path, {
                'entryhead': self.entryhead,
                'entrylist': self.entrylist,
            })
        
            memcache.add(self.__class__.__name__+self.keyword, value=output, 
                time=CACHE_TIMEOUT)
        
        return output
    
class DabagirlHandler(BaseHtmlHandler):
    def __init__(self):
        BaseHtmlHandler.__init__(self)
        self.url = 'http://www.dabagirl.co.kr'
        # initial entryhead
        self.entryhead = {
            'title': 'DabaGirl - Korean',
            'root_url': 'http://www.dabagirl.co.kr',
            'description': 'DabaGirl - Korean',
            'atom_url': '',
            'last_update':(datetime.datetime.now()).strftime('%Y-%m-%dT%H:%M:%SZ'),
            'author': 'A.TNG',
        }
    
    def htmlParser(self):
        if not self.html:
            self.getHtml()
        
        # fetch html error
        if self.html is None:
            return
        
        tag_td = SoupStrainer('td', {'width': '25%'})
        
        item_cnt = 30
        self.entrylist = []
        for item in BeautifulSoup(self.html, parseOnlyThese=tag_td, fromEncoding='euc-kr'):
            if item_cnt > 0:
                entry = {}
                entry['title'] = item.find('font')
                entry['permalink'] = self.genPermalink(self.entryhead['root_url']+item.find('a')['href'])
#                logging.info(self.entryhead['root_url']+item.find('a')['href'])
                entry['content'] = unicode(item)
                self.entrylist.append(entry)
            item_cnt -= 1
    
    def getAtom(self):
        # add memcache
        output = memcache.get(self.__class__.__name__)
        if output is not None:
            logging.info(self.__class__.__name__ + ' - from memcache')
        else:
            if not self.entrylist:
                self.htmlParser()

            path = os.path.join(os.path.dirname(__file__), 'atom.xml')
            output = template.render(path, {
                'entryhead': self.entryhead,
                'entrylist': self.entrylist,
            })
            
            memcache.add(key=self.__class__.__name__, value=output, 
                time=CACHE_TIMEOUT)
            
        return output

class YoukuSearchHandler(BaseHtmlHandler):
    def __init__(self, keyword=''):
        BaseHtmlHandler.__init__(self)
        self.url = 'http://so.youku.com/search_video/q_%s' % \
            urllib.quote(keyword.encode('UTF-8'))
        
        self.keyword = keyword
        # initial entryhead
        self.entryhead = {
            'title': u'Youku搜索 - %s' % keyword,
            'root_url': 'http://www.youku.com',
            'description': u'Youku搜索 - %s (ATOM格式输出)' % keyword,
            'atom_url': '',
            'last_update': (datetime.datetime.now()).strftime('%Y-%m-%dT%H:%M:%SZ'),
            'author': 'A.TNG',
        }
    
    def htmlParser(self):
        if not self.html:
            self.getHtml()
            
        # fetch html error
        if self.html is None:
            return
            
        # get contents from <div class="videos">
        myul = SoupStrainer('ul', {'class': 'video'})
        
        # only parse the first <ul class='video'>
        item_cnt = 10
        self.entrylist = []
        for item in BeautifulSoup(self.html, parseOnlyThese=myul, fromEncoding='UTF-8'):
            if item_cnt > 0:
                entry = {}
                
                try:
                    tag_li_list = item.findAll('li')
                    tag_add_button = tag_li_list[1]
                    tag_li_title = tag_li_list[2]
                    # title
                    entry['title'] = u'%s - %s' % \
                        (tag_li_title.h1.a.string, tag_li_title.h1.span.string)
                    
                    # permalink
                    entry['permalink'] = self.genPermalink(tag_li_title.h1.a['href'])
                    
                    # content
                    tag_add_button.extract()
                    entry['content'] = unicode(item)
                except:
                    logging.info(self.__class__.__name__+'-'+self.keyword)
                self.entrylist.append(entry)
                
            item_cnt -= 1
    
    def getAtom(self):
        output = memcache.get(self.__class__.__name__+self.keyword)
        if output is not None:
            logging.info(self.__class__.__name__+self.keyword+' - from memcache')
        else:
            if not self.entrylist:
                self.htmlParser()

            path = os.path.join(os.path.dirname(__file__), 'atom.xml')
            output = template.render(path, {
                'entryhead': self.entryhead,
                'entrylist': self.entrylist,
            })
            
            memcache.add(self.__class__.__name__+self.keyword, value=output,
                time=CACHE_TIMEOUT)

        return output
    
class CherrykokoHandler(GeneralHtmlHandler):
    def __init__(self):
        GeneralHtmlHandler.__init__(self)
        self.url = 'http://www.cherrykoko.com'
        self.entryhead = {
            'title': 'Cherrykoko - Korean',
            'root_url': 'http://www.cherrykoko.com',
            'description': 'Cherrykoko - Korean',
            'atom_url': '',
            'last_update': (datetime.datetime.now()).strftime('%Y-%m-%dT%H:%M:%SZ'),
            'author': 'A.TNG',
        }
    
    def doParse(self):
#        logging.info('doParse')
        if self.html is None:
            return
    
        mydiv = SoupStrainer('div', {'id': 'productList'})
        item_cnt = 30
        self.entrylist = []
        for item in BeautifulSoup(self.html, parseOnlyThese=mydiv, fromEncoding='euc-kr'):
#            logging.info(item_cnt)
            if item_cnt > 0:
                entry = {}
                tag_p = item.findAll('p')
                entry['title'] = unicode(tag_p[1].a.string)
                url = self.entryhead['root_url']+tag_p[1].a['href']
                entry['permalink'] = self.genPermalink(url)
                
                tag_p[0].a['href'] = url
                tag_p[1].a['href'] = url
                entry['content'] = unicode(item)
                self.entrylist.append(entry)
                
            item_cnt -= 1
            
class StyleonmeHandler(GeneralHtmlHandler):
    def __init__(self):
        GeneralHtmlHandler.__init__(self)
        self.url = 'http://www.styleonme.com'
        self.entryhead = {
            'title': 'Sytleonme - Korean',
            'root_url': 'http://www.styleonme.com',
            'description': 'Styleonme - Korean',
            'atom_url': '',
            'last_update': (datetime.datetime.now()).strftime('%Y-%m-%dT%H:%M:%SZ'),
            'author': 'A.TNG',
        }
        
    def doParse(self):
        if self.html is None:
            return
        
        mytd = SoupStrainer('td', {'width':'20%'})
        item_cnt = 50
        self.entrylist = []
        for item in BeautifulSoup(self.html, parseOnlyThese=mytd, fromEncoding='euc-kr'):
            if item_cnt > 0:
                entry = {}
                tag_a = item.findAll('a')
                entry['title'] = unicode(tag_a[1].string)
                url = self.entryhead['root_url']+tag_a[0]['href']
                entry['permalink'] = self.genPermalink(url)
                tag_a[0]['href'] = url
                tag_a[1]['href'] = url
                entry['content'] = unicode(item)
                self.entrylist.append(entry)

            item_cnt -= 1

class MainHandler(webapp.RequestHandler):
    def get(self):
#        path = os.path.join(os.path.dirname(__file__), 'atom_sample.xml')
#        self.response.out.write(template.render(path, {}))
        
        # trying memcache
        data = memcache.get(u'key中文')
        if data is not None:
            self.response.out.write('from memcache<br>')
        else:
            data = 'memcache test data'
            memcache.add(key=u'key中文', value=data, time=5)
            
        self.response.out.write(data)

class GougouLatestFilmSpider(webapp.RequestHandler):
    def get(self):
        o = GougouLatestFilmHandler()
        o.goSpider()
        # display schdulerservice.appspot.com auth code
        self.response.out.write('a5arbw2fgc3wg7njjricrt2l4x4sfgt9')
        logging.info('GougouLatestFilmSpider-' + \
            datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'))
        
class GougouLatestFilmAtom(webapp.RequestHandler):
    def get(self):
        o = GougouLatestFilmHandler()
        self.response.out.write(o.getAtom())
        logging.info('GougouLatestFilmAtom-' + \
            datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'))
    
class GougouSearchAtom(webapp.RequestHandler):
    def get(self):
        self.request.charset='UTF-8'
        keyword = self.request.get('keyword') # currently, keyword is unicode
        o = GougouSearchHandler(keyword)
        
        self.response.headers['Content-Type'] = 'application/atom+xml'
        self.response.out.write(o.getAtom())
        logging.info('GougouSearchAtom-' + \
            datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'))
        
class YoukuSearchAtom(webapp.RequestHandler):
    def get(self):
        self.request.charset = 'UTF-8'
        keyword = self.request.get('keyword')
        o = YoukuSearchHandler(keyword)
        
        self.response.headers['Content-Type'] = 'application/atom+xml'
        self.response.out.write(o.getAtom())
        logging.info('YoukuSearchAtom-' + \
            datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'))

class DabagirlAtom(webapp.RequestHandler):
    def get(self):
        o = DabagirlHandler()
        self.response.out.write(o.getAtom())
        logging.info('DabagirlAtom-' + \
            datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'))

class CherrykokoAtom(webapp.RequestHandler):
    def get(self):
        o = CherrykokoHandler()
        self.response.out.write(o.getAtom())
        logging.info('CherrykokoAtom-' + \
            datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'))
            
class StyleonmeAtom(webapp.RequestHandler):
    def get(self):
        o = StyleonmeHandler()
        self.response.out.write(o.getAtom())
        logging.info('StyleonmeAtom-' + \
            datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'))

class UrlRedirect(webapp.RequestHandler):
    def get(self, index):
        url_mapping = UrlMap.gql('WHERE index = :1', int(index)).fetch(1)
#        logging.info(url_mapping)
        if url_mapping:
            self.redirect(url_mapping[0].url)
        else:
            self.error(500)

def main():
    application = webapp.WSGIApplication([
            ('/rsstool/gougoulatestfilm.spider', GougouLatestFilmSpider),
            ('/rsstool/gougoulatestfilm.atom', GougouLatestFilmAtom),
            ('/rsstool/gougousearch.atom', GougouSearchAtom),
            ('/rsstool/youkusearch.atom', YoukuSearchAtom),
            ('/rsstool/dabagirl.atom', DabagirlAtom),
            ('/rsstool/cherrykoko.atom', CherrykokoAtom),
            ('/rsstool/styleonme.atom', StyleonmeAtom),
            ('/rsstool/urlmapping/(\d*)', UrlRedirect),
            ('/rsstool/', MainHandler),
        ],
        debug=True)  
    wsgiref.handlers.CGIHandler().run(application)

if __name__ == '__main__':
    main()
