﻿from google.appengine.ext import webapp, db
from google.appengine.ext.webapp.util import run_wsgi_app 
from google.appengine.api import urlfetch
from base_request import *
import datetime, html5lib, BeautifulSoup, time
from google.appengine.api import *
from google.appengine.api import taskqueue
from itertools import *
import feedparser
from model import *
import urlparse



def debug_mode():
  return os.environ.get('SERVER_SOFTWARE','').lower().startswith('devel')
UPDATE_INTERVAL = datetime.timedelta(hours = 2)  
HOST = 'http://localhost:8080' if debug_mode() else 'http://phuntid.appspot.com'

class MainHandler(BaseRequestHandler):
  def get(self):
    self.response.out.write(template.render('index.html', { 'siteview':True,'sites' : Site.all().order('-last_updated').fetch(1000) }))
    														  
class ViewHandler(BaseRequestHandler):
  def get(self,key = None,offset= 0):	
    s = Site.get(key)
    p = ParsedPage.all().filter('site =', s).filter('has_pics =',True).order('-last_updated').fetch(2,offset = offset)
    
    self.response.out.write(template.render('index.html', { 'host':HOST,
                                                            'site': s,
                                                            'pics' : p[0].pics if len(p) > 0 else '',
                                                            'pp' : p[0] if len(p) > 0 else None,
                                                            'next' : str(offset + 1) if len(p) > 1 else None,
                                                            'prev' : str(offset - 1) if offset != 0 else None }))
	

class AddHandler(BaseRequestHandler):
  def post(self,url = '',crawl = ''):
    if not re.match('http://',url):
      url = 'http://' + url
    if not re.search('^\s*$',url):
      s = Site.get_by_key_name('k:'+url)
      if s is None:
        s = Site(key_name = 'k:'+url)
      s.crawl = True if crawl else False
      s.put()
      logging.debug('Added: ' + url)
      taskqueue.add(url='/update',params = {'key':s.key()})
    self.seeother('/')

class DeleteHandler(BaseRequestHandler):
  def get(self,key):
    s = Site.get(key)
    if s is not None:
      taskqueue.add(url='/delete_pp',params = {'key':s.key()})      
      db.delete(s)
    self.seeother('/')

class DeletePP(TaskRequestHandler):
  def get(self,key):
    self.post(key)
  def post(self,key):
    pp = ParsedPage.all().filter('site =',db.Key(key)).fetch(100)
    if len(pp) > 0:
      db.delete(pp)
      taskqueue.add(url='/delete_pp',params = {'key':key})
      
    
class UpdateAllHandler(TaskRequestHandler):
  def post(self):
    for s in Site.all(keys_only = True).fetch(1000):
      taskqueue.add(url='/update',params = {'key':s})
  def get(self):    
    self.post()
  
class UpdateHandler(TaskRequestHandler): 
  def get(self,key,url = None):    
    self.post(key,url)
    
  def post(self,key,url = None):        
    logging.debug(key)
    site = Site.get(key)
    if site is None:
      logging.warn('Key does not exist: '+key)
      return
    if url is None:
      site.put()
      logging.debug('Scheduling to suck the main page!')
      taskqueue.add(url='/update',params = {'key':key,'url' : site.url })
      return
    
    logging.debug('trying to suck: '+ url)
    parsedpage = ParsedPage.get_by_key_name('k:'+url)   
    if parsedpage is None:
      parsedpage = ParsedPage(key_name = 'k:'+url, url = url, last_updated = site.last_updated, site = site)
    elif url != site.url:
      parsedpage.put()
      logging.debug('This page has been sucked already.')
      return
    if site.last_updated < parsedpage.last_updated:
      logging.debug('this link was already sucked in this round.')
      return
    try:      
      soup = html5lib.HTMLParser(tree=html5lib.treebuilders.getTreeBuilder("beautifulsoup")).parse(fetch_html(parsedpage))     
    except PageNotModified:
      return
    except Exception,e:
      logging.debug('ERROR: '+str(e))
      return
    logging.debug('done: '+ url)
    
    to_put = [parsedpage]   


    
    pics = []
    for a in soup.findAll(href = re.compile('\.jpg$')):      
      link, imgs = None, []      
      try:
        link = urlparse.urljoin(parsedpage.url,a['href'])
      except Exception, e:
        continue
      for img in a.findAll('img'):
        try:
          imgs.append(urlparse.urljoin(parsedpage.url,img['src']))
        except Exception, e:
          pass
      
     
      
      if link[:5] == 'http:' and len(imgs) > 0:
        pics.append('<a href="'+link+'" target=_blank>'+''.join('<img src="'+src+'" alt="img plz!" />' for src in imgs) +'</a>')
        
        
    parsedpage.pics = ''.join(sorted(pics))
    parsedpage.has_pics = len(pics) > 0
    
    import hashlib
    parsedpage.hashp = hashlib.sha256(site.url + parsedpage.pics).hexdigest()
    logging.debug(str(len(pics)) + ' pics were extracted')
    if parsedpage.has_pics and ParsedPage.all().filter('hashp =',parsedpage.hashp).get() is not None:
      parsedpage.pics = ''
      parsedpage.has_pics = False
      logging.debug('These pics are already found on another page!')
      
    
    if url == site.url and site.crawl is True:
      cnt = 0
      links = filter(lambda x: re.match('http://',x), 
										 set(urlparse.urljoin(site.url,a['href']) 
											 for a in soup.findAll(href = True)))
			

      for link,pp in izip(links,ParsedPage.get_by_key_name(['k:'+l for l in links])):
          if pp is None:
            logging.debug('adding to queue: '+link)
            cnt += 2
          
            taskqueue.add(url='/update',params = {'key':key,'url' : link}, countdown = cnt)
          else:
            to_put.append(pp)
            #logging.debug('this link was already sucked: '+link+' Pics:' + str(len(pp.pics)))
      logging.debug('there are ' + str(len(to_put)-1) + ' live links.')
    db.put(to_put)
     
     
def fetch_html(parsedpage):  
  '''
    try:
      if not can_fetch(parsedpage.url):      
        raise ForbiddenByRobotsTxt
    except urlfetch.Error, e:
      logging.warn('Cannot fetch robots.txt. '+str(e) + ': ' + parsedpage.url)   
  '''    
  
  headers = {'Accept-encoding': 'gzip, deflate'}
  if parsedpage.etag:
    headers['If-None-Match'] = parsedpage.etag
  if parsedpage.last_modified:
    headers['If-Modified-Since'] = parsedpage.last_modified
  try:
    r = urlfetch.fetch(parsedpage.url, headers = headers)
  except urlfetch.Error, e:
    logging.warn('Urlfetch failed for URL: '+ parsedpage.url+'\n\nException:\n'+str(e))    
    return ''
    
  
  etag=r.headers.get('ETag', None)    
  last_modified=r.headers.get('Last-Modified', None)
  
  if (r.status_code == 304 or 
      (etag is not None and etag == parsedpage.etag) or
      (last_modified is not None and last_modified == parsedpage.last_modified)):
    #logging.debug('Not modified')
    raise PageNotModified
  
  if r.final_url is not None:
    parsedpage.url = r.final_url
  parsedpage.etag = etag
  parsedpage.last_modified = last_modified    
  
  expires = datetime.datetime.now() + datetime.timedelta(hours = 1)
  if 'Expires' in r.headers:
    try:
      expires = max( datetime.datetime.fromtimestamp(time.mktime(feedparser._parse_date(r.headers['Expires']))),
                   datetime.datetime.now() + datetime.timedelta(days = 1))  
    except Exception,e:
      pass
  parsedpage.expires = expires
  return r.content
   
    
   
class FeedHandler(TaskRequestHandler):
  def get(self,key,items = 200):
    def escape(text):
      import cgi
      if text is None:
        return ''
      return cgi.escape(unicode(BeautifulSoup.BeautifulSoup(text)))

    def format_date_rfc_822(date):
      zone = date.strftime("%Z")
      if not zone:
        zone = "GMT"
      return date.strftime("%a, %d %b %Y %H:%M:%S ") + zone
      
    site = Site.get(key)
      
    if site is None:
      self.error(410)
      self.response.out.write('<?xml version="1.0"?><redirect><newLocation/></redirect>')
      return
    
    self.response.headers['Last-Modified'] = format_date_rfc_822(site.last_updated)    
    if 'If-Modified-Since' in self.request.headers:      
      if datetime.datetime.fromtimestamp(time.mktime(feedparser._parse_date(self.request.headers['If-Modified-Since']))) >= site.last_updated - datetime.timedelta(microseconds = site.last_updated.microsecond):        
        self.error(304)
        return
    
    
    self.response.headers['Content-Type'] = 'application/xml; charset=utf-8'
    
    entries = ParsedPage.all().filter('site =', site).filter('has_pics =',True).filter('last_updated >',site.last_updated - UPDATE_INTERVAL/2).fetch(100)
    
    
    if len(entries) < 20:
      entries = ParsedPage.all().filter('site =', site).filter('has_pics =',True).order('-last_updated').fetch(20)
    
    
    self.response.out.write(template.render('rss2.xml',
                                          {'host':HOST,
                                           'site' : { 'key' : site.key(),
                                                      'url' : site.url,
                                                      'last_updated' : format_date_rfc_822(site.last_updated)
                                                    },
                                           'entries' : [{'last_updated' : format_date_rfc_822(pp.last_updated) ,
                                                         'title': escape(pp.url),
                                                         'url': escape(pp.key().name()[2:]) ,
                                                         'escaped_text': escape(pp.pics +
                                                            '<br />source: '+pp.url+'<br \>from '+site.url)
                                                        } for pp in sorted(entries,key = lambda x:x.last_updated, reverse = True)]
                                          }))


                                          
class Dummy(TaskRequestHandler):
  def get(self):
    logging.debug(self.request.cookies)
    logging.debug(self.request.headers)
  def post(self):
    self.get()
class CleanUpHandler(TaskRequestHandler):
  def get(self,key = None):
    if key is None:
      for s in Site.all().fetch(200):
        taskqueue.add(url='/cleanup',params = {'key':s.key()})
    else:
      s = Site.get(key)
      pp = ParsedPage.all().filter('site =',s).filter('last_updated <', s.last_updated).fetch(200)
      if len(pp) > 0:
        db.delete(pp)
        logging.debug('Deleting '+str(len(pp))+ ' items from '+s.url)
        taskqueue.add(url='/cleanup',params = {'key':key})
      
    
    
  def post(self):
    self.get()
                                          
def main():
  application = webapp.WSGIApplication([('/', MainHandler),
                                        ('/view',ViewHandler),
                                        ('/update_all',UpdateAllHandler),
                                        ('/update',UpdateHandler),
                                        ('/add',AddHandler),
                                        ('/cleanup',CleanUpHandler),
                                        ('/delete',DeleteHandler),
                                        ('/delete_pp',DeletePP),
                                        ('/dummy',Dummy),
                                        ('/feed',FeedHandler)], debug=True)
  run_wsgi_app(application)
  
 
if __name__ == '__main__':
    main()
